text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GPT-J model.""" import warnings from typing import Optional, Union import torch import torch.fx import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_start_docstrings, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, logging, ) from ...utils.model_parallel_utils import assert_device_map, get_device_map from .configuration_gptj import GPTJConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor: inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64) / dim)) sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.int64).float(), inv_freq).float() return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1) @torch.fx.wrap def get_embed_positions(embed_positions, position_ids): return embed_positions.to(position_ids.device).repeat(position_ids.shape[0], 1, 1) def rotate_every_two(x: torch.Tensor) -> torch.Tensor: x1 = x[:, :, :, ::2] x2 = x[:, :, :, 1::2] x = torch.stack((-x2, x1), dim=-1) return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)') def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor: sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3) cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3) return (tensor * cos) + (rotate_every_two(tensor) * sin) class GPTJAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.config = config max_positions = config.max_position_embeddings self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.is_causal = True self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.embed_dim = config.hidden_size self.num_attention_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_attention_heads if self.head_dim * self.num_attention_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" f" `num_attention_heads`: {self.num_attention_heads})." ) self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.rotary_dim = config.rotary_dim pos_embd_dim = self.rotary_dim or self.embed_dim self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim) def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary): """ Splits hidden dim into attn_head_size and num_attention_heads """ new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) tensor = tensor.view(new_shape) if rotary: return tensor if len(tensor.shape) == 5: return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features) elif len(tensor.shape) == 4: return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) else: raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") def _merge_heads(self, tensor, num_attention_heads, attn_head_size): """ Merges attn_head_size dim and num_attn_heads dim into hidden dim """ if len(tensor.shape) == 5: tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() elif len(tensor.shape) == 4: tensor = tensor.permute(0, 2, 1, 3).contiguous() else: raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) return tensor.view(new_shape) def _attn( self, query, key, value, attention_mask=None, head_mask=None, ): # Keep the attention weights computation in fp32 to avoid overflow issues query = query.to(torch.float32) key = key.to(torch.float32) attn_weights = torch.matmul(query, key.transpose(-1, -2)) attn_weights = attn_weights / self.scale_attn if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.to(value.dtype) attn_weights = self.attn_dropout(attn_weights) # Mask heads if we want to if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _get_embed_positions(self, position_ids): embed_positions = self.embed_positions if embed_positions.device != position_ids.device: embed_positions = embed_positions.to(position_ids.device) self.embed_positions = embed_positions return embed_positions.repeat(position_ids.shape[0], 1, 1) def forward( self, hidden_states: torch.FloatTensor, layer_past: Optional[Cache] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, ) -> Union[ tuple[torch.Tensor, tuple[torch.Tensor]], Optional[tuple[torch.Tensor, tuple[torch.Tensor], tuple[torch.Tensor, ...]]], ]: query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): # The logic to conditionally copy to GPU could not be traced, so we do this # every time in the torch.fx case embed_positions = get_embed_positions(self.embed_positions, position_ids) else: embed_positions = self._get_embed_positions(position_ids) repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) sincos = torch.gather(embed_positions, 1, repeated_position_ids).to(key.dtype) sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, : self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim :] q_rot = query[:, :, :, : self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim :] k_rot = apply_rotary_pos_emb(k_rot, sin, cos) q_rot = apply_rotary_pos_emb(q_rot, sin, cos) key = torch.cat([k_rot, k_pass], dim=-1) query = torch.cat([q_rot, q_pass], dim=-1) else: key = apply_rotary_pos_emb(key, sin, cos) query = apply_rotary_pos_emb(query, sin, cos) key = key.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) if layer_past is not None: cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_dim, "cache_position": cache_position, } key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) # compute self-attention: V x Softmax(QK^T) attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class GPTJFlashAttention2(GPTJAttention): """ GPTJ flash attention module. This module inherits from `GPTJAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() def forward( self, hidden_states: torch.FloatTensor, layer_past: Optional[Cache] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, ) -> Union[ tuple[torch.Tensor, tuple[torch.Tensor]], Optional[tuple[torch.Tensor, tuple[torch.Tensor], tuple[torch.Tensor, ...]]], ]: query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): # The logic to conditionally copy to GPU could not be traced, so we do this # every time in the torch.fx case embed_positions = get_embed_positions(self.embed_positions, position_ids) else: embed_positions = self._get_embed_positions(position_ids) repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) sincos = torch.gather(embed_positions, 1, repeated_position_ids).to(key.dtype) sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, : self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim :] q_rot = query[:, :, :, : self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim :] k_rot = apply_rotary_pos_emb(k_rot, sin, cos) q_rot = apply_rotary_pos_emb(q_rot, sin, cos) key = torch.cat([k_rot, k_pass], dim=-1) query = torch.cat([q_rot, q_pass], dim=-1) else: key = apply_rotary_pos_emb(key, sin, cos) query = apply_rotary_pos_emb(query, sin, cos) # tanspose to have the desired shape # before transpose: batch_size x seq_length x num_attention_heads x head_dim # after transpose: batch_size x num_attention_heads x seq_length x head_dim key = key.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) # value: batch_size x num_attention_heads x seq_length x head_dim if layer_past is not None: cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_dim, "cache_position": cache_position, } key, value = layer_past.update(key, value, self.layer_idx, cache_kwargs) # The Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we need to keep the original shape for query and key, and reshape value # to have the correct shape. key = key.permute(0, 2, 1, 3).contiguous() query = query.permute(0, 2, 1, 3).contiguous() value = value.permute(0, 2, 1, 3).contiguous() # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (LlamaRMSNorm handles it correctly) input_dtype = query.dtype device_type = query.device.type if query.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query = query.to(target_dtype) key = key.to(target_dtype) value = value.to(target_dtype) attention_dropout = self.config.attn_pdrop if self.training else 0.0 # attn_pdrop in gptj query_length = query.shape[1] # Compute attention attn_weights = _flash_attention_forward( query, key, value, attention_mask, query_length, dropout=attention_dropout, is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) # Reshape outputs attn_output = attn_weights.reshape( attn_weights.shape[0], attn_weights.shape[1], attn_weights.shape[2] * attn_weights.shape[3] ) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights GPTJ_ATTENTION_CLASSES = { "eager": GPTJAttention, "flash_attention_2": GPTJFlashAttention2, } class GPTJMLP(nn.Module): def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim super().__init__() embed_dim = config.n_embd self.fc_in = nn.Linear(embed_dim, intermediate_size) self.fc_out = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTJBlock(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) self.mlp = GPTJMLP(inner_dim, config) def forward( self, hidden_states: Optional[torch.FloatTensor], layer_past: Optional[Cache] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs, attn_weights = self.attn( hidden_states=hidden_states, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = attn_outputs + feed_forward_hidden_states + residual return hidden_states, attn_weights @auto_docstring class GPTJPreTrainedModel(PreTrainedModel): config: GPTJConfig base_model_prefix = "transformer" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["GPTJBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _can_compile_fullgraph = True _supports_param_buffer_assignment = False def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear,)): # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (`dict[int, list]`, *optional*): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the following number of attention modules: - gpt-j-6B: 28 Example: ```python # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules: model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") device_map = { 0: [0, 1, 2, 3, 4, 5, 6], 1: [7, 8, 9, 10, 11, 12, 13], 2: [14, 15, 16, 17, 18, 19, 20], 3: [21, 22, 23, 24, 25, 26, 27], } model.parallelize(device_map) ``` """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to CPU from a model parallel state. Example: ```python # On a 4 GPU machine with gpt-j-6B: model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") device_map = { 0: [0, 1, 2, 3, 4, 5, 6], 1: [7, 8, 9, 10, 11, 12, 13], 2: [14, 15, 16, 17, 18, 19, 20], 3: [21, 22, 23, 24, 25, 26, 27], } model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() ``` """ @auto_docstring class GPTJModel(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.embed_dim = config.n_embd self.vocab_size = config.vocab_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTJBlock(config, layer_idx=i) for i in range(config.n_layer)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Model parallel self.model_parallel = False self.device_map = None self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," " ...}", FutureWarning, ) # Check validity of device_map self.device_map = ( get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.h)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) self.wte = self.wte.to(self.first_device) # Load onto devices for k, v in self.device_map.items(): for block in v: cuda_device = "cuda:" + str(k) self.h[block] = self.h[block].to(cuda_device) # ln_f to last self.ln_f = self.ln_f.to(self.last_device) @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" self.wte = self.wte.to("cpu") for index in range(len(self.h)): self.h[index] = self.h[index].to("cpu") self.ln_f = self.ln_f.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.wte(input_ids) # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() seq_length = inputs_embeds.shape[1] if cache_position is None: past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_attention_heads x N x N # head_mask has shape n_layer x batch x num_attention_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) hidden_states = inputs_embeds if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, seq_length) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = (-1, seq_length, hidden_states.size(-1)) all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure layer_past is on same device as hidden_states (might not be correct) if past_key_values is not None: for layer in past_key_values.layers: layer.keys = layer.keys.to(hidden_states.device) layer.values = layer.values.to(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if causal_mask is not None: causal_mask = causal_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @auto_docstring( custom_intro=""" The GPT-J Model transformer with a language modeling head on top. """ ) class GPTJForCausalLM(GPTJPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPTJModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): warnings.warn( "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" " 0, 'transformer.h.1': 1, ...}", FutureWarning, ) self.device_map = ( get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.transformer.h)) self.transformer.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.transformer.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): warnings.warn( "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.transformer.deparallelize() self.transformer = self.transformer.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False torch.cuda.empty_cache() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Union[tuple, CausalLMOutputWithPast]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) # make sure sampling in fp16 works correctly and # compute loss in fp32 to match with mesh-tf version # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 lm_logits = self.lm_head(hidden_states).to(torch.float32) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Flatten the tokens loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring( custom_intro=""" The GPT-J Model transformer with a sequence classification head on top (linear layer). [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT, GPT-2, GPT-Neo) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class GPTJForSequenceClassification(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTJModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(pooled_logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class GPTJForQuestionAnswering(GPTJPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTJModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Model parallel self.model_parallel = False self.device_map = None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_dim)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "GPTJForCausalLM", "GPTJForQuestionAnswering", "GPTJForSequenceClassification", "GPTJModel", "GPTJPreTrainedModel", ]
transformers/src/transformers/models/gptj/modeling_gptj.py/0
{ "file_path": "transformers/src/transformers/models/gptj/modeling_gptj.py", "repo_id": "transformers", "token_count": 23940 }
513
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/granitemoehybrid/modular_granitemoehybrid.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_granitemoehybrid.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 IBM and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Optional, TypedDict, Union import torch import torch.nn.functional as F from torch import nn from transformers.activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, MoeCausalLMOutputWithPast, MoeModelOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available from .configuration_granitemoehybrid import GraniteMoeHybridConfig if is_mamba_2_ssm_available(): from mamba_ssm.ops.triton.selective_state_update import selective_state_update from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined else: selective_state_update = None if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # copied from transformers.models.granite.modeling_granite.GraniteAttention with Granite->GraniteMoeHybrid # no longer copied after attention refactors class GraniteMoeHybridAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.scaling = config.attention_multiplier if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # None or rope embeddings **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = position_embeddings if position_embeddings is not None else (None, None) if position_embeddings is not None: query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, attn_weights class HybridMambaAttentionDynamicCache: """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. """ is_compileable = False def __init__(self, config: GraniteMoeHybridConfig, batch_size, dtype=torch.float16, device=None): self.layers_block_type = config.layers_block_type self.has_previous_state = False # only used by mamba conv_kernel_size = config.mamba_d_conv ssm_state_size = config.mamba_d_state self.conv_states = [] self.ssm_states = [] self.transformer_layers = [] for i in range(config.num_hidden_layers): if self.layers_block_type[i] == "mamba": self.conv_states += [ torch.zeros( batch_size, (config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size), conv_kernel_size, device=device, dtype=dtype, ) ] self.ssm_states += [ torch.zeros( batch_size, config.mamba_n_heads, config.mamba_d_head, ssm_state_size, device=device, dtype=dtype, ) ] else: self.conv_states += [torch.tensor([[]] * batch_size, device=device)] self.ssm_states += [torch.tensor([[]] * batch_size, device=device)] self.transformer_layers.append(i) self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: # Update the cache if self.key_cache[layer_idx].shape[-1] == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) return self.key_cache[layer_idx], self.value_cache[layer_idx] def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.conv_states[layer_idx].device self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) device = self.ssm_states[layer_idx].device self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device)) def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx: return 0 return self.key_cache[layer_idx].shape[-2] # Helper methods for segment sum computation def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int): """ Padding x tensor with `pad_size` on the seq_len dim (dim=1) Assumes that we only have tensors of either size 4 or 3 """ pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0) return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0) def reshape_into_chunks(input_tensor, pad_size, chunk_size): """ Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and simultaneously splitting it into chunk sequences. Assumes that we only have tensors of either size 4 or 3 """ # [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...] input_tensor = pad_tensor_by_size(input_tensor, pad_size) if len(input_tensor.shape) == 3: # [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads] return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2]) else: # [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size] return input_tensor.reshape( input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3] ) def segment_sum(input_tensor): """ More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions. """ chunk_size = input_tensor.size(-1) # 1. expand input tensor to have an additional dimension and repeat along that dimension # [..., chunk_size] -> [..., chunk_size, chunk_size] input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size) # 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1) input_tensor = input_tensor.masked_fill(~mask, 0) # 3. compute actual cumsum tensor_segsum = torch.cumsum(input_tensor, dim=-2) # 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time) mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0) tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf) return tensor_segsum is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update)) def apply_mask_to_padding_states(hidden_states, attention_mask): """ Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66 """ if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: dtype = hidden_states.dtype hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return hidden_states # Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer class GraniteMoeHybridMambaLayer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) The are a few differences between this and Mamba2Mixer: - The variable use_precomputed_states is slightly different due to the hybrid cache structure - There's a few non-obvious bugs fixed with batching in the slow path that exist in main - Some extra variables that our layer doesn't need have been removed - We ported most of the refactors in https://github.com/huggingface/transformers/pull/35154, which is (as of Dec 18, 2024) unmerged """ def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int): super().__init__() self.num_heads = config.mamba_n_heads self.hidden_size = config.hidden_size self.ssm_state_size = config.mamba_d_state self.conv_kernel_size = config.mamba_d_conv self.intermediate_size = int(config.mamba_expand * self.hidden_size) self.layer_idx = layer_idx self.use_conv_bias = config.mamba_conv_bias self.activation = config.hidden_act self.act = ACT2FN[config.hidden_act] self.use_bias = config.mamba_proj_bias self.layer_norm_epsilon = config.rms_norm_eps self.n_groups = config.mamba_n_groups self.head_dim = config.mamba_d_head self.chunk_size = config.mamba_chunk_size # FIXME: self.time_step_limit = (0.0, float("inf")) self.time_step_min = 0.001 self.time_step_max = 0.1 self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size self.conv1d = nn.Conv1d( in_channels=self.conv_dim, out_channels=self.conv_dim, bias=config.mamba_conv_bias, kernel_size=self.conv_kernel_size, groups=self.conv_dim, padding=self.conv_kernel_size - 1, ) # projection of the input hidden states projection_size = self.intermediate_size + self.conv_dim + self.num_heads self.in_proj = nn.Linear( self.hidden_size, projection_size, bias=self.use_bias, ) # selective projection used to make dt, B and C input dependent # time step projection (discretization) # instantiate once and copy inv_dt in init_weights of PretrainedModel self.dt_bias = nn.Parameter(torch.ones(self.num_heads)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.num_heads + 1) self.A_log = nn.Parameter(torch.log(A)) self.A_log._no_weight_decay = True self.norm = GraniteMoeHybridRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon) self.D = nn.Parameter(torch.ones(self.num_heads)) self.D._no_weight_decay = True self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`" " is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d" ) else: logger.warning_once("The fast path for GraniteMoeHybrid will be used when running the model on a GPU") def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, seq_idx: Optional[torch.IntTensor] = None, ): # 1. Gated MLP's linear projection hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask) projected_states = self.in_proj(hidden_states) # Set up dimensions for reshapes later batch_size, seq_len, _ = hidden_states.shape groups_time_state_size = self.n_groups * self.ssm_state_size use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size and cache_position is not None and cache_position[0] > 0 ) # getting projected states from cache if it exists if use_precomputed_states: gate, hidden_states_B_C, dt = projected_states.squeeze(1).split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation hidden_states_B_C = causal_conv1d_update( hidden_states_B_C, cache_params.conv_states[self.layer_idx], self.conv1d.weight.squeeze(1), self.conv1d.bias, self.activation, ) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # (nheads,) A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) dt = dt[:, :, None].expand(-1, -1, self.head_dim) dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim) D = self.D[:, None, ...].expand(-1, self.head_dim) B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups) C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups) hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim) hidden_states = selective_state_update( cache_params.ssm_states[self.layer_idx], hidden_states_reshaped, dt, A, B, C, D, z=None, dt_bias=dt_bias, dt_softplus=True, ) hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim) hidden_states = self.norm(hidden_states, gate) # 4. Final linear projection out = self.out_proj(hidden_states)[:, None, ...] # Fused calculations or step by step if no initialized cache is found else: A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size) dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit} # 2-4. Fused kernel for conv1d, SSM, and the final projection if self.training and cache_params is None: out = mamba_split_conv1d_scan_combined( projected_states, self.conv1d.weight.squeeze(1), self.conv1d.bias, self.dt_bias, A, D=self.D, chunk_size=self.chunk_size, seq_idx=seq_idx, activation=self.activation, rmsnorm_weight=self.norm.weight, rmsnorm_eps=self.norm.variance_epsilon, outproj_weight=self.out_proj.weight, outproj_bias=self.out_proj.bias, headdim=self.head_dim, ngroups=self.n_groups, norm_before_gate=False, return_final_states=False, **dt_limit_kwargs, ) else: gate, hidden_states_B_C, dt = projected_states.split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) # 2. Convolution sequence transformation # Init cache if cache_params is not None: # storing the states # If we just take xBC[:, :, -self.d_conv :], it will error if seqlen < self.d_conv # Instead F.pad will pad with zeros if seqlen < self.d_conv, and truncate otherwise. hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0), ) cache_params.conv_states[self.layer_idx].copy_(conv_states) if self.activation not in ["silu", "swish"]: hidden_states_B_C = self.act( self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2) ) else: hidden_states_B_C = causal_conv1d_fn( x=hidden_states_B_C.transpose(1, 2), weight=self.conv1d.weight.squeeze(1), bias=self.conv1d.bias, activation=self.activation, seq_idx=seq_idx, ).transpose(1, 2) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, groups_time_state_size, groups_time_state_size], dim=-1, ) # 3. SSM transformation scan_output, ssm_state = mamba_chunk_scan_combined( hidden_states.view(batch_size, seq_len, -1, self.head_dim), dt, A, B.view(batch_size, seq_len, self.n_groups, -1), C.view(batch_size, seq_len, self.n_groups, -1), chunk_size=self.chunk_size, D=self.D, z=None, seq_idx=seq_idx, return_final_states=True, dt_bias=self.dt_bias, dt_softplus=True, **dt_limit_kwargs, ) # Init cache if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) scan_output = scan_output.view(batch_size, seq_len, -1) # Multiply "gate" branch and apply extra normalization layer scan_output = self.norm(scan_output, gate) # 4. Final linear projection out = self.out_proj(scan_output) return out # fmt: off def torch_forward( self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, ): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated MLP's linear projection input_states = apply_mask_to_padding_states(input_states, attention_mask) projected_states = self.in_proj(input_states) gate, hidden_states_B_C, dt = projected_states.split( [self.intermediate_size, self.conv_dim, self.num_heads], dim=-1 ) use_precomputed_states = ( cache_params is not None and cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == cache_params.ssm_states[self.layer_idx].shape[0] == batch_size and cache_position is not None and cache_position[0] > 0 ) # 2. Convolution sequence transformation if use_precomputed_states: cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1) cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device) # We need to guarantee that anything regarding the cache is on the same device conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device) hidden_states_B_C = torch.sum( conv_states * self.conv1d.weight.squeeze(1), dim=-1 ) if self.use_conv_bias: hidden_states_B_C = hidden_states_B_C + self.conv1d.bias hidden_states_B_C = self.act(hidden_states_B_C) else: # Init cache if cache_params is not None: hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2) conv_states = nn.functional.pad( hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0) ) cache_params.conv_states[self.layer_idx].copy_(conv_states) hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)) hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask) hidden_states, B, C = torch.split( hidden_states_B_C, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1 ) # 3. SSM transformation A = -torch.exp(self.A_log.float()) # [num_heads] if use_precomputed_states: # We need to guarantee that anything regarding the cache is on the same device cache_device = cache_params.ssm_states[self.layer_idx].device # Note: there is no need to pad parameter matrices here, as there is just one new token # for batched generation dt = dt[:, 0, :][:, None, ...] dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim) # [num_heads] -> [num_heads, head_dim] dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim) dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype)) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32) # [bsz, num_heads, head_dim, state_size] dA = (torch.exp(dt[..., None] * A)).to(device=cache_device) # Discretize B # [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] -> # -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size] B = B.reshape(batch_size, self.n_groups, -1)[..., None, :] B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous() B = B.reshape(batch_size, -1, B.shape[-1]) # [bsz, num_heads, head_dim, state_size] dB = dt[..., None] * B[..., None, :] # Discretize x into dB # [bsz, intermediate_size] -> [bsz, num_heads, head_dim] hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim) dBx = (dB * hidden_states[..., None]).to(device=cache_device) # State calculation cache_params.ssm_states[self.layer_idx].copy_( cache_params.ssm_states[self.layer_idx] * dA + dBx ) # Subsequent output # [bsz, n_groups * state_size] -> [bsz, num_heads, state_size] C = C.reshape(batch_size, self.n_groups, -1)[..., None, :] C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous() C = C.reshape(batch_size, -1, C.shape[-1]) # [bsz, num_heads, head_dim] ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n] # Reshape ssm_states to merge the first two dimensions ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n] C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1] y = torch.bmm(ssm_states_reshaped, C_reshaped) y = y.view(batch_size, self.num_heads, self.head_dim) # D skip connection # [num_heads] -> [num_heads, head_dim] D = self.D[..., None].expand(self.D.shape[0], self.head_dim) y = (y + hidden_states * D).to(y.dtype) # [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size] y = y.reshape(batch_size, -1)[:, None, ...] else: # begin ssd naive implementation without einsums dt = nn.functional.softplus(dt + self.dt_bias) dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1]) hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float() B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float() B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads) pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size) # Discretize x and A hidden_states = hidden_states * dt[..., None] A = A.to(hidden_states.dtype) * dt # Rearrange into blocks/chunks hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)] # [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size] A = A.permute(0, 3, 1, 2) A_cumsum = torch.cumsum(A, dim=-1) # 1. Compute the output for each intra-chunk (diagonal blocks) # This is the analog of a causal mask L = torch.exp(segment_sum(A)) # Contraction of C and B to get G (attention-weights like) G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n) G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h) # Compute M, equivalent to applying attention mask to weights M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None] M = M_intermediate.sum(dim=-1) # Compute Y_diag (apply to values) Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3) # 2. Compute the state for each intra-chunk # (right term of low-rank factorization of off-diagonal blocks; B terms) decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum) B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None] states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2) # 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries # (middle term of factorization of off-diag blocks; A terms) if use_precomputed_states: previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device) else: previous_states = torch.zeros_like(states[:, :1]) states = torch.cat([previous_states, states], dim=1) decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0)))) decay_chunk = decay_chunk.transpose(1, 3) new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1) states, ssm_state = new_states[:, :-1], new_states[:, -1] # 4. Compute state -> output conversion per chunk # (left term of low-rank factorization of off-diagonal blocks; C terms) state_decay_out = torch.exp(A_cumsum) C_times_states = (C[..., None, :] * states[:, :, None, ...]) state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1) Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None]) # Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks) y = Y_diag + Y_off # [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim] y = y.reshape(batch_size, -1, self.num_heads, self.head_dim) y = y + D_residual # Cutting off padded chunks if pad_size > 0: y = y[:, :seq_len, :, :] y = y.reshape(batch_size, seq_len, -1) # Init cache if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) scan_output = self.norm(y, gate) # end ssd naive # 4. Final linear projection contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size] return contextualized_states # fmt: on def forward( self, hidden_states, cache_params: Optional[HybridMambaAttentionDynamicCache] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, seq_idx: Optional[torch.IntTensor] = None, **kwargs, ): if is_fast_path_available and "cuda" in self.in_proj.weight.device.type: return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask, seq_idx) if seq_idx is not None: raise NotImplementedError( "`seq_idx` support requires fast path support. Please install `mamba_ssm` and `causal_conv1d`" ) dtype = hidden_states.dtype if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1: # tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66 hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype) return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask) class GraniteMoeHybridRMSNormGated(torch.nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states, gate=None): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) if gate is not None: hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32)) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class GraniteMoeHybridMLP(nn.Module): """ MLP layer for shared experts Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: GraniteMoeHybridConfig): super().__init__() self.input_size = config.hidden_size self.hidden_size = config.shared_intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = nn.Linear(self.input_size, self.hidden_size * 2, bias=False) self.output_linear = nn.Linear(self.hidden_size, self.input_size, bias=False) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.input_linear(hidden_states) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] hidden_states = self.output_linear(hidden_states) return hidden_states class GraniteFlashAttentionKwargs(TypedDict, total=False): """ Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage. Use cases include padding-free training and fewer `torch.compile` graph breaks. Attributes: cu_seq_lens_q (`torch.LongTensor`) Gets cumulative sequence length for query state. cu_seq_lens_k (`torch.LongTensor`) Gets cumulative sequence length for key state. max_length_q (`int`): Maximum sequence length for query state. max_length_k (`int`): Maximum sequence length for key state. seq_idx (`torch.IntTensor): Index of each packed sequence. """ cu_seq_lens_q: torch.LongTensor cu_seq_lens_k: torch.LongTensor max_length_q: int max_length_k: int seq_idx: torch.IntTensor class GraniteMoeHybridRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ GraniteMoeHybridRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class GraniteMoeHybridParallelExperts(nn.Module): def __init__(self, num_experts: int, input_size: int, output_size: int) -> None: """ Initialize the GraniteMoeHybridParallelExperts module. The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and [ScatterMoE](https://github.com/shawntan/scattermoe), as well as the [MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py) used in vllm. Args: num_experts (int): Number of experts. input_size (int): Size of the input. output_size (int): Size of the output. """ super().__init__() self.weight = nn.Parameter(torch.empty(num_experts, output_size, input_size)) self.num_experts = num_experts self.input_size = input_size self.output_size = output_size def forward(self, inputs, expert_size): """ Forward pass of the GraniteMoeHybridParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor. """ input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results class GraniteMoeHybridTopKGating(nn.Module): def __init__(self, input_size: int, num_experts: int, top_k: int): """ Initialize the top-k gating mechanism. Args: input_size (`int`): Size of the input. num_experts (`int`): Number of experts. top_k (`int`): Number of top experts to select. """ super().__init__() self.num_experts = num_experts self.input_size = input_size self.top_k = top_k self.layer = nn.Linear(input_size, num_experts, bias=False) def forward(self, hidden_states): # compute the top_k routing decision logits = self.layer(hidden_states).float() # [batch_size x seq_len, num_experts] top_k_logits, top_k_indices = logits.topk(self.top_k, dim=1) # [num_tokens, top_k] top_k_gates = torch.softmax(top_k_logits, dim=1).type_as(hidden_states) # [num_tokens, top_k] # compute number of input given to each expert zeros = torch.zeros( [top_k_gates.size(0), self.num_experts], dtype=top_k_gates.dtype, device=top_k_gates.device ) # [num_tokens, num_experts] gates = zeros.scatter(1, top_k_indices, 1) # [num_tokens, num_experts] expert_size = gates.long().sum(0) # [num_experts,] # (This cause torch.compile to fail with `torch._dynamo.exc.Unsupported: Backend compiler failed with a fake tensor exception at`) # (and `DataDependentOutputException`) expert_size = expert_size.tolist() # sort and group input tokens according to expert assignment top_k_experts = top_k_indices.flatten() # [num_tokens * top_k] _, index_sorted_experts = top_k_experts.sort(0) # [num_tokens * top_k] batch_index = index_sorted_experts.div(self.top_k, rounding_mode="trunc") # [num_tokens * top_k] # gather the gate values for grouped input tokens top_k_gates = top_k_gates.flatten() # [num_tokens * top_k] batch_gates = top_k_gates[index_sorted_experts] # [num_tokens * top_k] return index_sorted_experts, batch_index, batch_gates, expert_size, logits class GraniteMoeHybridMoE(nn.Module): """ A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts. Args: config: Configuration object with model hyperparameters. """ def __init__(self, config: GraniteMoeHybridConfig): super().__init__() self.input_size = config.hidden_size self.hidden_size = config.intermediate_size self.activation = ACT2FN[config.hidden_act] self.input_linear = GraniteMoeHybridParallelExperts( config.num_local_experts, self.input_size, self.hidden_size * 2 ) self.output_linear = GraniteMoeHybridParallelExperts( config.num_local_experts, self.hidden_size, self.input_size ) self.router = GraniteMoeHybridTopKGating( input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok, ) def forward(self, layer_input): """ Forward pass of the mixture of experts layer. Args: layer_input (Tensor): Input tensor. Returns: Tensor: Output tensor. Tensor: Router logits. """ bsz, length, emb_size = layer_input.size() layer_input = layer_input.reshape(-1, emb_size) _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input) expert_inputs = layer_input[batch_index] hidden_states = self.input_linear(expert_inputs, expert_size) chunked_hidden_states = hidden_states.chunk(2, dim=-1) hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1] expert_outputs = self.output_linear(hidden_states, expert_size) expert_outputs = expert_outputs * batch_gates[:, None] zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device) layer_output = zeros.index_add(0, batch_index, expert_outputs) layer_output = layer_output.view(bsz, length, self.input_size) return layer_output, router_logits class GraniteMoeHybridDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: GraniteMoeHybridConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size # Either attention or mamba will be initialized, depending on the layer type. self.self_attn = None if config.num_local_experts > 0: self.block_sparse_moe = GraniteMoeHybridMoE(config) self.input_layernorm = GraniteMoeHybridRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = GraniteMoeHybridRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.residual_multiplier = config.residual_multiplier self.shared_mlp = GraniteMoeHybridMLP(config) self.mamba = None if config.layers_block_type[layer_idx] == "mamba": self.mamba = GraniteMoeHybridMambaLayer(config, layer_idx) else: self.self_attn = GraniteMoeHybridAttention(config, layer_idx) self.layer_type = config.layers_block_type[layer_idx] # Accept 0 experts: skip MoE if num_local_experts == 0 self.has_experts = getattr(config, "num_local_experts", 0) > 0 @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, output_router_logits: Optional[bool] = False, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[GraniteFlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. kwargs (`dict`, *optional*): Arbitrary kwargs.Can be used to provide `GraniteFlashAttentionKwargs` for padding-free training and/or improve torch.compile performance. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) if self.mamba is not None: hidden_states = self.mamba( hidden_states=hidden_states, cache_position=cache_position, cache_params=past_key_values, attention_mask=attention_mask, **kwargs, ) # No attention weights for state space layers self_attn_weights = None else: hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states * self.residual_multiplier # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) if self.has_experts: moe_hidden_states, router_logits = self.block_sparse_moe(hidden_states) hidden_states = moe_hidden_states + self.shared_mlp(hidden_states) else: hidden_states = self.shared_mlp(hidden_states) router_logits = None hidden_states = residual + hidden_states * self.residual_multiplier outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if output_router_logits: outputs += (router_logits,) return outputs @auto_docstring class GraniteMoeHybridPreTrainedModel(PreTrainedModel): config: GraniteMoeHybridConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["GraniteMoeHybridDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported) _is_stateful = True def _init_weights(self, module): super()._init_weights(module) if isinstance(module, GraniteMoeHybridParallelExperts): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, GraniteMoeHybridMambaLayer): module.dt_bias.data.fill_(1.0) module.A_log.data = torch.log(torch.arange(1, module.num_heads + 1)) module.D.data.fill_(1.0) elif isinstance(module, GraniteMoeHybridRMSNormGated): module.weight.data.fill_(1.0) class GraniteMoeHybridRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: GraniteMoeHybridConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) @auto_docstring class GraniteMoeHybridModel(GraniteMoeHybridPreTrainedModel): def __init__(self, config: GraniteMoeHybridConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [GraniteMoeHybridDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GraniteMoeHybridRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.embedding_multiplier = config.embedding_multiplier self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.position_embedding_type = config.position_embedding_type self.rotary_emb = GraniteMoeHybridRotaryEmbedding(config) if self.position_embedding_type == "rope" else None # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[GraniteFlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds * self.embedding_multiplier ## overwritten because `HybridMambaAttentionDynamicCache` is needed if use_cache and past_key_values is None: logger.warning_once( "GraniteMoeHybrid requires an initialized `HybridMambaAttentionDynamicCache` to return a cache. " "Because one was not provided, no cache will be returned." ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) mamba_mask = self._update_mamba_mask(attention_mask, cache_position) # embed positions hidden_states = inputs_embeds position_embeddings = None # create position embeddings to be shared across the decoder layers if self.rotary_emb is not None: position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_logits = () if output_router_logits else None for decoder_layer in self.layers: # Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention) layer_mask = mamba_mask if decoder_layer.layer_type == "mamba" else causal_mask if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=layer_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, output_router_logits=output_router_logits, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: if layer_outputs[1] is not None: # append attentions only of attention layers. Mamba layers return `None` as the attention weights all_self_attns += (layer_outputs[1],) if output_router_logits: if layer_outputs[-1] is not None: # append router logits only of expert layers. Regular MLP layers return `None` as the router logits all_router_logits += (layer_outputs[-1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if past_key_values and not past_key_values.has_previous_state: past_key_values.has_previous_state = True return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, router_logits=all_router_logits, ) def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask def _update_mamba_mask(self, attention_mask, cache_position): """ No need for zeroing states when 1. Cached forward 2. Attending to all inputs """ mamba_mask = attention_mask if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)): mamba_mask = None return mamba_mask def load_balancing_loss_func( gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], num_experts: Optional[int] = None, top_k=2, attention_mask: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, int]: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits: Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. num_experts: Number of experts top_k: The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter. attention_mask (`torch.Tensor`, *optional*): The attention_mask used in forward function shape [batch_size X sequence_length] if not None. Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) if attention_mask is None: # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) else: batch_size, sequence_length = attention_mask.shape num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask expert_attention_mask = ( attention_mask[None, :, :, None, None] .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) .reshape(-1, top_k, num_experts) .to(compute_device) ) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( expert_attention_mask, dim=0 ) # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert router_per_expert_attention_mask = ( attention_mask[None, :, :, None] .expand((num_hidden_layers, batch_size, sequence_length, routing_weights.shape[1])) .reshape(-1, routing_weights.shape[1]) .to(compute_device) ) # Compute the average probability of routing to these experts router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( router_per_expert_attention_mask, dim=0 ) device_index = routing_weights.device.index if routing_weights.device.index is not None else 0 rank = routing_weights.shape[1] * int(device_index) overall_loss = torch.sum( tokens_per_expert[:, rank : rank + routing_weights.shape[1]] * router_prob_per_expert.unsqueeze(0) ) return overall_loss * num_experts class GraniteMoeHybridForCausalLM(GraniteMoeHybridPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: GraniteMoeHybridConfig): super().__init__(config) self.model = GraniteMoeHybridModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.router_aux_loss_coef = config.router_aux_loss_coef self.num_experts = config.num_local_experts self.num_experts_per_tok = config.num_experts_per_tok # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple, MoeCausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, GraniteMoeHybridForCausalLM >>> model = GraniteMoeHybridForCausalLM.from_pretrained("ibm/PowerMoE-3b") >>> tokenizer = AutoTokenizer.from_pretrained("ibm/PowerMoE-3b") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, **kwargs, ) # Only compute necessary logits hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits / self.config.logits_scaling loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() # Flatten the tokens loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) aux_loss = None if output_router_logits: aux_loss = load_balancing_loss_func( outputs.router_logits if return_dict else outputs[-1], self.num_experts, self.num_experts_per_tok, attention_mask, ) if labels is not None: loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device if not return_dict: output = (logits,) + outputs[1:] if output_router_logits: output = (aux_loss,) + output return (loss,) + output if loss is not None else output return MoeCausalLMOutputWithPast( loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs, ): # Overwritten -- has a unique cache type, `HybridMambaAttentionDynamicCache` empty_past_kv = past_key_values is None # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case. # (we can't check exception 3 while compiling) if not empty_past_kv: if ( inputs_embeds is not None # Exception 1 or cache_position[-1] >= input_ids.shape[1] # Exception 3 ): input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] elif use_cache: past_key_values = HybridMambaAttentionDynamicCache( self.config, input_ids.shape[0], self.dtype, device=self.device ) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if not empty_past_kv: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and empty_past_kv: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "cache_position": cache_position, } ) return model_inputs __all__ = ["GraniteMoeHybridForCausalLM", "GraniteMoeHybridModel", "GraniteMoeHybridPreTrainedModel"]
transformers/src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py/0
{ "file_path": "transformers/src/transformers/models/granitemoehybrid/modeling_granitemoehybrid.py", "repo_id": "transformers", "token_count": 38199 }
514
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert GroupViT checkpoints from the original repository. URL: https://github.com/NVlabs/GroupViT """ import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def rename_key(name): # vision encoder if "img_encoder.pos_embed" in name: name = name.replace("img_encoder.pos_embed", "vision_model.embeddings.position_embeddings") if "img_encoder.patch_embed.proj" in name: name = name.replace("img_encoder.patch_embed.proj", "vision_model.embeddings.patch_embeddings.projection") if "img_encoder.patch_embed.norm" in name: name = name.replace("img_encoder.patch_embed.norm", "vision_model.embeddings.layernorm") if "img_encoder.layers" in name: name = name.replace("img_encoder.layers", "vision_model.encoder.stages") if "blocks" in name and "res" not in name: name = name.replace("blocks", "layers") if "attn" in name and "pre_assign" not in name: name = name.replace("attn", "self_attn") if "proj" in name and "self_attn" in name and "text" not in name: name = name.replace("proj", "out_proj") if "pre_assign_attn.attn.proj" in name: name = name.replace("pre_assign_attn.attn.proj", "pre_assign_attn.attn.out_proj") if "norm1" in name: name = name.replace("norm1", "layer_norm1") if "norm2" in name and "pre_assign" not in name: name = name.replace("norm2", "layer_norm2") if "img_encoder.norm" in name: name = name.replace("img_encoder.norm", "vision_model.layernorm") # text encoder if "text_encoder.token_embedding" in name: name = name.replace("text_encoder.token_embedding", "text_model.embeddings.token_embedding") if "text_encoder.positional_embedding" in name: name = name.replace("text_encoder.positional_embedding", "text_model.embeddings.position_embedding.weight") if "text_encoder.transformer.resblocks." in name: name = name.replace("text_encoder.transformer.resblocks.", "text_model.encoder.layers.") if "ln_1" in name: name = name.replace("ln_1", "layer_norm1") if "ln_2" in name: name = name.replace("ln_2", "layer_norm2") if "c_fc" in name: name = name.replace("c_fc", "fc1") if "c_proj" in name: name = name.replace("c_proj", "fc2") if "text_encoder" in name: name = name.replace("text_encoder", "text_model") if "ln_final" in name: name = name.replace("ln_final", "final_layer_norm") # projection layers if "img_projector.linear_hidden." in name: name = name.replace("img_projector.linear_hidden.", "visual_projection.") if "img_projector.linear_out." in name: name = name.replace("img_projector.linear_out.", "visual_projection.3.") if "text_projector.linear_hidden" in name: name = name.replace("text_projector.linear_hidden", "text_projection") if "text_projector.linear_out" in name: name = name.replace("text_projector.linear_out", "text_projection.3") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors key_split = key.split(".") stage_num, layer_num = int(key_split[2]), int(key_split[4]) dim = config.vision_config.hidden_size if "weight" in key: orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.weight" ] = val[:dim, :] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.weight" ] = val[dim : dim * 2, :] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.weight" ] = val[-dim:, :] else: orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.q_proj.bias" ] = val[:dim] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.k_proj.bias" ] = val[dim : dim * 2] orig_state_dict[ f"vision_model.encoder.stages.{stage_num}.layers.{layer_num}.self_attn.v_proj.bias" ] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors key_split = key.split(".") layer_num = int(key_split[3]) dim = config.text_config.hidden_size if "weight" in key: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ dim : dim * 2, : ] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] else: orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2] orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] else: new_name = rename_key(key) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): orig_state_dict[new_name] = val.squeeze_() else: orig_state_dict[new_name] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_groupvit_checkpoint( checkpoint_path, pytorch_dump_folder_path, model_name="groupvit-gcc-yfcc", push_to_hub=False ): """ Copy/paste/tweak model's weights to the Transformers design. """ config = GroupViTConfig() model = GroupViTModel(config).eval() state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"] new_state_dict = convert_state_dict(state_dict, config) missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(unexpected_keys) == 0) # verify result processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") image = prepare_img() inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) if model_name == "groupvit-gcc-yfcc": expected_logits = torch.tensor([[13.3523, 6.3629]]) elif model_name == "groupvit-gcc-redcaps": expected_logits = torch.tensor([[16.1873, 8.6230]]) else: raise ValueError(f"Model name {model_name} not supported.") assert torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3) processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) print("Successfully saved processor and model to", pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") processor.push_to_hub(model_name, organization="nielsr") model.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) args = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/groupvit/convert_groupvit_nvlab_to_hf.py", "repo_id": "transformers", "token_count": 4256 }
515
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Hiera checkpoints from the original repository. URL: https://github.com/facebookresearch/hiera """ import argparse import json import math import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, HieraConfig, HieraForImageClassification, HieraForPreTraining, HieraModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config: HieraConfig, base_model: bool, mae_model: bool): rename_keys = [] # fmt: off num_stages = len(config.depths) # embedding dimensions for input and stages dims = [config.embed_dim] + [int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(num_stages)] global_layer_idx = 0 for stage_idx in range(num_stages): dim_in = dims[stage_idx] dim_out = dims[stage_idx + 1] for layer_idx in range(config.depths[stage_idx]): rename_keys.append((f"blocks.{global_layer_idx}.norm1.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.weight")) rename_keys.append((f"blocks.{global_layer_idx}.norm1.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_before.bias")) rename_keys.append((f"blocks.{global_layer_idx}.attn.qkv.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.weight")) rename_keys.append((f"blocks.{global_layer_idx}.attn.qkv.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.qkv.bias")) rename_keys.append((f"blocks.{global_layer_idx}.attn.proj.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.weight")) rename_keys.append((f"blocks.{global_layer_idx}.attn.proj.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.attn.proj.bias")) rename_keys.append((f"blocks.{global_layer_idx}.norm2.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.weight")) rename_keys.append((f"blocks.{global_layer_idx}.norm2.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.layernorm_after.bias")) rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc1.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.weight")) rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc1.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc1.bias")) rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc2.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.weight")) rename_keys.append((f"blocks.{global_layer_idx}.mlp.fc2.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.mlp.fc2.bias")) # projection layer only for the first layer of each stage boundary (except the first stage) if dim_out != dim_in and layer_idx == 0: rename_keys.append((f"blocks.{global_layer_idx}.proj.weight", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.weight")) rename_keys.append((f"blocks.{global_layer_idx}.proj.bias", f"hiera.encoder.stages.{stage_idx}.layers.{layer_idx}.proj.bias")) global_layer_idx += 1 # projection layer + position embeddings rename_keys.extend( [ ("patch_embed.proj.weight", "hiera.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "hiera.embeddings.patch_embeddings.projection.bias") ] ) rename_keys.append(("pos_embed", "hiera.embeddings.position_embeddings")) if base_model: # layernorm + pooler rename_keys.extend([("norm.weight", "pooler.layernorm.weight"), ("norm.bias", "pooler.layernorm.bias")]) # if just the base model, we should remove "hiera" from all keys that start with "hiera" rename_keys = [(pair[0], pair[1][6:]) if pair[1].startswith("hiera") else pair for pair in rename_keys] elif mae_model: rename_keys.extend( [ ("encoder_norm.weight", "encoder_norm.weight"), ("encoder_norm.bias", "encoder_norm.bias"), ("mask_token", "decoder.mask_token"), ("decoder_pos_embed", "decoder.decoder_position_embeddings"), ("decoder_norm.weight", "decoder.decoder_norm.weight"), ("decoder_norm.bias", "decoder.decoder_norm.bias"), ("decoder_pred.weight", "decoder.decoder_pred.weight"), ("decoder_pred.bias", "decoder.decoder_pred.bias"), ("decoder_embed.weight", "decoder.decoder_embeddings.weight"), ("decoder_embed.bias", "decoder.decoder_embeddings.bias") ] ) for i in range(config.decoder_depth): rename_keys.extend( [ (f"decoder_blocks.{i}.norm1.weight", f"decoder.decoder_block.layers.{i}.layernorm_before.weight"), (f"decoder_blocks.{i}.norm1.bias", f"decoder.decoder_block.layers.{i}.layernorm_before.bias"), (f"decoder_blocks.{i}.attn.qkv.weight", f"decoder.decoder_block.layers.{i}.attn.qkv.weight"), (f"decoder_blocks.{i}.attn.qkv.bias", f"decoder.decoder_block.layers.{i}.attn.qkv.bias"), (f"decoder_blocks.{i}.attn.proj.weight", f"decoder.decoder_block.layers.{i}.attn.proj.weight"), (f"decoder_blocks.{i}.attn.proj.bias", f"decoder.decoder_block.layers.{i}.attn.proj.bias"), (f"decoder_blocks.{i}.norm2.weight", f"decoder.decoder_block.layers.{i}.layernorm_after.weight"), (f"decoder_blocks.{i}.norm2.bias", f"decoder.decoder_block.layers.{i}.layernorm_after.bias"), (f"decoder_blocks.{i}.mlp.fc1.weight", f"decoder.decoder_block.layers.{i}.mlp.fc1.weight"), (f"decoder_blocks.{i}.mlp.fc1.bias", f"decoder.decoder_block.layers.{i}.mlp.fc1.bias"), (f"decoder_blocks.{i}.mlp.fc2.weight", f"decoder.decoder_block.layers.{i}.mlp.fc2.weight"), (f"decoder_blocks.{i}.mlp.fc2.bias", f"decoder.decoder_block.layers.{i}.mlp.fc2.bias"), ] ) for i in range(config.num_query_pool): rename_keys.extend( [ (f"multi_scale_fusion_heads.{i}.weight", f"multiscale_fusion.multi_scale_fusion_heads.{i}.weight"), (f"multi_scale_fusion_heads.{i}.bias", f"multiscale_fusion.multi_scale_fusion_heads.{i}.bias") ] ) else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "hiera.pooler.layernorm.weight"), ("norm.bias", "hiera.pooler.layernorm.bias"), ("head.projection.weight", "classifier.weight"), ("head.projection.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def remove_classification_head_(state_dict): ignore_keys = ["head.projection.weight", "head.projection.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im def get_labels_for_classifier(model_name: str) -> tuple[dict[int, str], dict[str, int], int]: repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) return id2label, label2id, num_labels def get_hiera_config(model_name: str, base_model: bool, mae_model: bool) -> HieraConfig: if model_name == "hiera-tiny-224": config = HieraConfig(depths=[1, 2, 7, 2]) elif model_name == "hiera-small-224": config = HieraConfig(depths=[1, 2, 11, 2]) elif model_name == "hiera-base-224": config = HieraConfig() elif model_name == "hiera-base-plus-224": config = HieraConfig(embed_dim=112, num_heads=[2, 4, 8, 16]) elif model_name == "hiera-large-224": config = HieraConfig(embed_dim=144, num_heads=[2, 4, 8, 16], depths=[2, 6, 36, 4]) elif model_name == "hiera-huge-224": config = HieraConfig(embed_dim=256, num_heads=[4, 8, 16, 32], depths=[2, 6, 36, 4]) else: raise ValueError(f"Unrecognized model name: {model_name}") if base_model: pass elif mae_model: config.num_query_pool = 2 config.decoder_hidden_size = 512 config.decoder_depth = 8 config.decoder_num_heads = 16 # Table 3b from Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles config.mask_ratio = 0.6 else: id2label, label2id, num_labels = get_labels_for_classifier(model_name) config.id2label = id2label config.label2id = label2id config.num_labels = num_labels return config @torch.no_grad() def convert_hiera_checkpoint(args): model_name = args.model_name base_model = args.base_model pytorch_dump_folder_path = args.pytorch_dump_folder_path push_to_hub = args.push_to_hub mae_model = args.mae_model config = get_hiera_config(model_name, base_model, mae_model) # Load original hiera model original_model_name = model_name.replace("-", "_") original_model_name = f"mae_{original_model_name}" if mae_model else original_model_name original_checkpoint_name = "mae_in1k_ft_in1k" if not (base_model or mae_model) else "mae_in1k" original_model = torch.hub.load( "facebookresearch/hiera", model=original_model_name, pretrained=True, checkpoint=original_checkpoint_name, ) original_model.eval() original_state_dict = original_model.state_dict() # Don't need to remove head for MAE because original implementation doesn't have it on MAE if base_model: remove_classification_head_(original_state_dict) # # Rename keys new_state_dict = original_state_dict.copy() rename_keys = create_rename_keys(config, base_model, mae_model) for src, dest in rename_keys: rename_key(new_state_dict, src, dest) # Load HF hiera model if base_model: model = HieraModel(config) elif mae_model: model = HieraForPreTraining(config) else: model = HieraForImageClassification(config) model.eval() missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) print("Missing keys:", missing_keys) print("Unexpected keys:", unexpected_keys) input_image = prepare_img() original_image_preprocessor = transforms.Compose( [ transforms.Resize(int((256 / 224) * 224), interpolation=transforms.functional.InterpolationMode.BICUBIC), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD), ] ) image_processor = BitImageProcessor( image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD, size={"shortest_edge": 256} ) inputs = image_processor(images=input_image, return_tensors="pt") expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0) input_image = prepare_img() inputs = image_processor(images=input_image, return_tensors="pt") expected_pixel_values = original_image_preprocessor(input_image).unsqueeze(0) assert torch.allclose(inputs.pixel_values, expected_pixel_values, atol=1e-4) print("Pixel values look good!") print(f"{inputs.pixel_values[0, :3, :3, :3]=}") # If is MAE we pass a noise to generate a random mask mask_spatial_shape = [ i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size) ] num_windows = math.prod(mask_spatial_shape) torch.manual_seed(2) noise = torch.rand(1, num_windows) outputs = model(**inputs) if not mae_model else model(noise=noise, **inputs) # original implementation returns logits.softmax(dim=-1) if base_model: expected_prob, expected_intermediates = original_model(expected_pixel_values, return_intermediates=True) expected_last_hidden = expected_intermediates[-1] batch_size, _, _, hidden_dim = expected_last_hidden.shape expected_last_hidden = expected_last_hidden.reshape(batch_size, -1, hidden_dim) assert torch.allclose(outputs.last_hidden_state, expected_last_hidden, atol=1e-3) print("Base Model looks good as hidden states match original implementation!") print(f"{outputs.last_hidden_state[0, :3, :3]=}") elif mae_model: # get mask from noise to be able to compare outputs mask, _ = model.hiera.embeddings.patch_embeddings.random_masking(expected_pixel_values, noise) expected_loss, _, _, _ = original_model(expected_pixel_values, mask=mask.bool()) assert torch.allclose(outputs.loss, expected_loss, atol=1e-3) print("MAE Model looks good as loss matches original implementation!") else: expected_prob = original_model(expected_pixel_values) assert torch.allclose(outputs.logits.softmax(dim=-1), expected_prob, atol=1e-3) print("Classifier looks good as probs match original implementation") print(f"{outputs.logits[:, :5]=}") if pytorch_dump_folder_path is not None: print(f"Saving model and processor for {model_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: hub_name = model_name if base_model: hub_name = model_name elif mae_model: hub_name = f"{model_name}-mae" else: hub_name = f"{model_name}-in1k" repo_id = f"EduardoPacheco/{hub_name}" print(f"Pushing model and processor for {model_name} to hub at {repo_id}") model.push_to_hub(repo_id) image_processor.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model-name", default="hiera-tiny-224", type=str, choices=[ "hiera-tiny-224", "hiera-small-224", "hiera-base-224", "hiera-base-plus-224", "hiera-large-224", "hiera-huge-224", ], help="Name of the Hiera model you'd like to convert.", ) parser.add_argument( "--pytorch-dump-folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--verify-logits", action="store_true", help="Whether or not to verify the logits against the original implementation.", ) parser.add_argument( "--push-to-hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) parser.add_argument( "--base-model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.add_argument( "--mae-model", action="store_true", help="Whether to convert to MAE checkpoint to HieraForPreTraining." ) args = parser.parse_args() convert_hiera_checkpoint(args)
transformers/src/transformers/models/hiera/convert_hiera_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/hiera/convert_hiera_to_hf.py", "repo_id": "transformers", "token_count": 7299 }
516
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_hunyuan_v1_moe.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional, Union import torch import torch.nn.functional as F from torch import nn from transformers.cache_utils import Cache from ...activations import ACT2FN from ...cache_utils import DynamicCache from ...generation import GenerationMixin from ...integrations import use_kernel_forward_from_hub from ...masking_utils import create_causal_mask from ...modeling_layers import GenericForSequenceClassification, GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from .configuration_hunyuan_v1_moe import HunYuanMoEV1Config @use_kernel_forward_from_hub("RMSNorm") class HunYuanMoEV1RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ HunYuanMoEV1RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class HunYuanMoEV1MLP(nn.Module): def __init__(self, config: HunYuanMoEV1Config, layer_idx=None, is_shared_mlp=False): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] self.layer_idx = layer_idx def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class HunYuanMoEV1Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: HunYuanMoEV1Config, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.query_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps) self.key_layernorm = HunYuanMoEV1RMSNorm(self.head_dim, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) query_states = self.query_layernorm(query_states) key_states = self.key_layernorm(key_states) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class HunYuanMoEV1Gate(nn.Module): def __init__(self, config: HunYuanMoEV1Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx num_experts = config.num_experts if isinstance(config.num_experts, int) else config.num_experts[layer_idx] self.wg = nn.Linear(config.hidden_size, num_experts, bias=False, dtype=torch.float32) def forward(self, hidden_states): bsz, seq_len, hidden_size = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_size) if self.wg.weight.dtype == torch.float32: hidden_states = hidden_states.float() logits = self.wg(hidden_states) return logits class HunYuanMoEV1Moe(nn.Module): def __init__(self, config: HunYuanMoEV1Config, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.num_experts = config.num_experts if isinstance(config.num_experts, int) else config.num_experts[layer_idx] self.top_k = config.moe_topk if isinstance(config.moe_topk, int) else config.moe_topk[layer_idx] self.gate = HunYuanMoEV1Gate(config, layer_idx=layer_idx) # self.wg = nn.Linear(config.hidden_size, config.num_experts, bias=False, dtype=torch.float32) self.experts = nn.ModuleList( [HunYuanMoEV1MLP(config, layer_idx=layer_idx, is_shared_mlp=False) for _ in range(self.num_experts)] ) self.shared_mlp = HunYuanMoEV1MLP(config, layer_idx=layer_idx, is_shared_mlp=True) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states_mlp = self.shared_mlp(hidden_states) router_logits = self.gate(hidden_states) hidden_states = hidden_states.view(-1, hidden_dim) # router_logits: (batch * sequence_length, n_experts) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) routing_weights /= routing_weights.sum(dim=-1, keepdim=True) # we cast back to the input dtype routing_weights = routing_weights.to(hidden_states.dtype) final_hidden_states = torch.zeros( (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device ) # One hot encode the selected experts to create an expert mask # this will be used to easily index which expert is going to be sollicitated expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) # Loop over all available experts in the model and perform the computation on each expert expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero() for expert_idx in expert_hit: expert_layer = self.experts[expert_idx] idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0)) # Index the correct hidden states and compute the expert hidden state for # the current expert. We need to make sure to multiply the output hidden # states by `routing_weights` on the corresponding tokens (top-1 and top-2) current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] # However `index_add_` only support torch tensors for indexing so we'll use # the `top_x` tensor here. final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) return final_hidden_states + hidden_states_mlp class HunYuanMoEV1DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: HunYuanMoEV1Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = HunYuanMoEV1Attention(config=config, layer_idx=layer_idx) self.mlp = HunYuanMoEV1Moe(config, layer_idx=layer_idx) self.input_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layer_idx = layer_idx @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring class HunYuanMoEV1PreTrainedModel(PreTrainedModel): config: HunYuanMoEV1Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["HunYuanMoEV1DecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _supports_attention_backend = True _can_record_outputs = { "hidden_states": HunYuanMoEV1DecoderLayer, "attentions": HunYuanMoEV1Attention, } def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() class HunYuanMoEV1RotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: HunYuanMoEV1Config, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] if self.rope_type == "dynamic" and config.rope_scaling["alpha"]: # DynamicNTKAlphaRotary self.dim = config.head_dim base = config.rope_theta * config.rope_scaling.get("alpha") ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.attention_scaling = 1.0 else: inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) @auto_docstring class HunYuanMoEV1Model(HunYuanMoEV1PreTrainedModel): def __init__(self, config: HunYuanMoEV1Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [HunYuanMoEV1DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = HunYuanMoEV1RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = HunYuanMoEV1RotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring class HunYuanMoEV1ForCausalLM(HunYuanMoEV1PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = HunYuanMoEV1Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" Example: ```python >>> from transformers import AutoTokenizer, HunYuanMoEV1ForCausalLM >>> model = HunYuanMoEV1ForCausalLM.from_pretrained("meta-hunyuan_v1_moe/HunYuanMoEV1-2-7b-hf") >>> tokenizer = AutoTokenizer.from_pretrained("meta-hunyuan_v1_moe/HunYuanMoEV1-2-7b-hf") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class HunYuanMoEV1ForSequenceClassification(GenericForSequenceClassification, HunYuanMoEV1PreTrainedModel): pass __all__ = [ "HunYuanMoEV1ForCausalLM", "HunYuanMoEV1Model", "HunYuanMoEV1PreTrainedModel", "HunYuanMoEV1ForSequenceClassification", ]
transformers/src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py/0
{ "file_path": "transformers/src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py", "repo_id": "transformers", "token_count": 11480 }
517
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert IJEPA checkpoints from the original repository. URL: https://github.com/facebookresearch/ijepa """ import argparse import gc import re from pathlib import Path from typing import Optional import requests import torch from PIL import Image from transformers import ( IJepaConfig, IJepaModel, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # Projection layer + position embeddings r"pos_embed": r"embeddings.position_embeddings", r"patch_embed.proj.weight": r"embeddings.patch_embeddings.projection.weight", r"patch_embed.proj.bias": r"embeddings.patch_embeddings.projection.bias", # Encoder layers: Layernorms, Attention, Feedforward layers r"blocks.(\d+).norm1.weight": r"encoder.layer.\1.layernorm_before.weight", r"blocks.(\d+).norm1.bias": r"encoder.layer.\1.layernorm_before.bias", r"blocks.(\d+).attn.proj.weight": r"encoder.layer.\1.attention.output.dense.weight", r"blocks.(\d+).attn.proj.bias": r"encoder.layer.\1.attention.output.dense.bias", r"blocks.(\d+).norm2.weight": r"encoder.layer.\1.layernorm_after.weight", r"blocks.(\d+).norm2.bias": r"encoder.layer.\1.layernorm_after.bias", r"blocks.(\d+).mlp.fc1.weight": r"encoder.layer.\1.intermediate.dense.weight", r"blocks.(\d+).mlp.fc1.bias": r"encoder.layer.\1.intermediate.dense.bias", r"blocks.(\d+).mlp.fc2.weight": r"encoder.layer.\1.output.dense.weight", r"blocks.(\d+).mlp.fc2.bias": r"encoder.layer.\1.output.dense.bias", # Layernorm + pooler r"norm.weight": r"layernorm.weight", r"norm.bias": r"layernorm.bias", } # fmt: on def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict] = None): """ Converts old keys to new keys using the mapping and dynamically removes the 'ijepa.' prefix if necessary. Args: state_dict_keys (dict): The keys from the state_dict to convert. Returns: dict: A mapping from old keys to new keys. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text # Apply regex-based mapping for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # Skip the key continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[-config.hidden_size :, :] state_dict[f"encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im def get_ijepa_config(model_name): patch_size = int(model_name.split("_")[1][4:]) config = IJepaConfig(patch_size=patch_size) if "vith" in model_name: config.hidden_size = 1280 config.num_hidden_layers = 32 config.num_attention_heads = 16 config.layer_norm_eps = 1e-6 config.mlp_ratio = 4 config.intermediate_size = 5120 if model_name == "ijepa_vith16_1k": config.image_size = 448 elif "vitg" in model_name: config.hidden_size = 1408 config.num_hidden_layers = 40 config.num_attention_heads = 16 config.layer_norm_eps = 1e-6 config.mlp_ratio = 48 / 11 config.intermediate_size = 6144 else: raise ValueError("Model not supported, only supports huge and giant models.") return config @torch.no_grad() def write_model(model_name, output_dir, safe_serialization, push_to_hub, verify_logits): """ Copy/paste/tweak model's weights to our IJEPA structure. """ # define default IJEPA configuration config = get_ijepa_config(model_name) checkpoint_mapping = { "ijepa_vith14_1k": "https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar", "ijepa_vith14_22k": "https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar", "ijepa_vith16_1k": "https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar", "ijepa_vitg16_22k": "https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar", } # Load original checkpoint checkpoint_url = checkpoint_mapping[model_name] original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["encoder"] original_state_dict = {k.replace("module.", ""): v for k, v in original_state_dict.items()} # Rename keys state_dict = original_state_dict.copy() new_keys = convert_old_keys_to_new_keys(state_dict.keys()) for old_key, new_key in new_keys.items(): rename_key(state_dict, old_key, new_key) read_in_q_k_v(state_dict, config) # load HuggingFace model model = IJepaModel(config, add_pooling_layer=False).eval() model.load_state_dict(state_dict) size = {"height": config.image_size, "width": config.image_size} image_processor = ViTImageProcessor(size=size) if verify_logits: # Check outputs on an image, prepared by ViTImageProcessor encoding = image_processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] with torch.no_grad(): outputs = model(pixel_values) expected_slices = { "ijepa_vith14_1k": torch.Tensor( [[-0.0621, -0.0054, -2.7513], [-0.1952, 0.0909, -3.9536], [0.0942, -0.0331, -1.2833]] ), "ijepa_vith14_22k": torch.Tensor( [[0.0358, -0.0045, -0.2154], [0.0418, -0.0246, 0.0108], [0.2529, -0.0345, -0.0246]] ), "ijepa_vith16_1k": torch.Tensor( [[0.5145, -0.1259, 0.0615], [0.1132, 0.0028, -0.0496], [1.1586, -0.0056, -0.0387]] ), "ijepa_vitg16_22k": torch.Tensor( [[0.0512, -0.0510, -0.0649], [0.1972, 0.0380, -0.0790], [0.1667, -0.0834, -0.1240]] ), } assert torch.allclose( expected_slices[model_name], outputs.last_hidden_state[0, :3, :3], atol=1e-4, ) if output_dir: Path(output_dir).mkdir(exist_ok=True) print(f"Saving model {model_name} to {output_dir}") image_processor.save_pretrained(output_dir, safe_serialization=safe_serialization) model.save_pretrained(output_dir, safe_serialization=safe_serialization) if push_to_hub: image_processor.push_to_hub(repo_id=f"jmtzt/{model_name}", safe_serialization=safe_serialization) model.push_to_hub(repo_id=f"jmtzt/{model_name}", safe_serialization=safe_serialization) if output_dir: del model, state_dict gc.collect() print("Reloading the model to check if it's saved correctly.") IJepaModel.from_pretrained(output_dir, device_map="auto") print("Model reloaded successfully.") def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ijepa_vith14_1k", type=str, choices=[ "ijepa_vith14_1k", "ijepa_vith14_22k", "ijepa_vith16_1k", "ijepa_vitg16_22k", ], help="Name of the model you'd like to convert.", ) parser.add_argument( "--output_dir", default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the model to the 🤗 Hub.", ) parser.add_argument( "--verify_logits", action="store_false", help="Whether or not to verify logits after conversion." ) parser.set_defaults() args = parser.parse_args() write_model(args.model_name, args.output_dir, args.safe_serialization, args.push_to_hub, args.verify_logits) if __name__ == "__main__": main()
transformers/src/transformers/models/ijepa/convert_ijepa_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/ijepa/convert_ijepa_to_hf.py", "repo_id": "transformers", "token_count": 4738 }
518
# coding=utf-8 # Copyright 2023 The Salesforce Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch InstructBLIP model.""" import math from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ..auto import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Class defining the outputs of [`InstructBlipForConditionalGeneration`]. """ ) # Copied from transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGenerationModelOutput with Blip2->InstructBlip class InstructBlipForConditionalGenerationModelOutput(ModelOutput): r""" loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Language modeling loss from the language model. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head of the language model. vision_outputs (`BaseModelOutputWithPooling`): Outputs of the vision encoder. qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): Outputs of the Q-Former (Querying Transformer). language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): Outputs of the language model. """ loss: Optional[tuple[torch.FloatTensor]] = None logits: Optional[tuple[torch.FloatTensor]] = None vision_outputs: Optional[torch.FloatTensor] = None qformer_outputs: Optional[tuple[torch.FloatTensor]] = None language_model_outputs: Optional[tuple[torch.FloatTensor]] = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->InstructBlip class InstructBlipVisionEmbeddings(nn.Module): def __init__(self, config: InstructBlipVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding class_pos_embed = self.position_embedding[:, :1] patch_pos_embed = self.position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings # Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> InstructBLIP doesn't cast attn weights to fp32 def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.blip_2.modeling_blip_2.Blip2Attention with Blip2->InstructBlip class InstructBlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.is_causal = False self.attention_dropout = config.attention_dropout # small tweak here compared to CLIP, no bias here self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False) if config.qkv_bias: q_bias = nn.Parameter(torch.zeros(self.embed_dim)) v_bias = nn.Parameter(torch.zeros(self.embed_dim)) else: q_bias = None v_bias = None if q_bias is not None: qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias)) self.qkv.bias = nn.Parameter(qkv_bias) self.projection = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute( 2, 0, 3, 1, 4 ) query_states, key_states, value_states = mixed_qkv[0], mixed_qkv[1], mixed_qkv[2] attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask=None, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.projection(attn_output) outputs = (attn_output, attn_weights) if output_attentions else (attn_output, None) return outputs # Copied from transformers.models.blip.modeling_blip.BlipMLP class InstructBlipMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.blip.modeling_blip.BlipEncoderLayer with Blip->InstructBlip class InstructBlipEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: InstructBlipConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = InstructBlipAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = InstructBlipMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, head_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + residual residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs @auto_docstring class InstructBlipPreTrainedModel(PreTrainedModel): config: InstructBlipConfig base_model_prefix = "blip" supports_gradient_checkpointing = True _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True _no_split_modules = [ "InstructBlipQFormerEmbeddings", "InstructBlipAttention", "InstructBlipQFormerMultiHeadAttention", "InstructBlipQFormerSelfOutput", ] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=factor) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, InstructBlipVisionEmbeddings): nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor) nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor) elif isinstance(module, (InstructBlipForConditionalGeneration, InstructBlipModel)): module.query_tokens.data.zero_() # Copied from transformers.models.blip.modeling_blip.BlipEncoder with Blip->InstructBlip class InstructBlipEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`InstructBlipEncoderLayer`]. Args: config (`InstructBlipConfig`): The corresponding vision configuration for the `InstructBlipEncoder`. """ def __init__(self, config: InstructBlipConfig): super().__init__() self.config = config self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP class InstructBlipVisionModel(InstructBlipPreTrainedModel): main_input_name = "pixel_values" config: InstructBlipVisionConfig def __init__(self, config: InstructBlipVisionConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.embeddings = InstructBlipVisionEmbeddings(config) self.encoder = InstructBlipEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def get_input_embeddings(self): return self.embeddings class InstructBlipQFormerMultiHeadAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores_dtype = attention_scores.dtype if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer class InstructBlipQFormerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerAttention with Blip2->InstructBlip class InstructBlipQFormerAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention) self.output = InstructBlipQFormerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->InstructBlipQFormer class InstructBlipQFormerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->InstructBlipQFormer class InstructBlipQFormerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class InstructBlipQFormerLayer(GradientCheckpointingLayer): def __init__(self, config, layer_idx): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = InstructBlipQFormerAttention(config) self.layer_idx = layer_idx if layer_idx % config.cross_attention_frequency == 0: self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = InstructBlipQFormerIntermediate(config) self.output = InstructBlipQFormerOutput(config) self.intermediate_query = InstructBlipQFormerIntermediate(config) self.output_query = InstructBlipQFormerOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, query_length=0, ): self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: if encoder_hidden_states is None: raise ValueError("encoder_hidden_states must be given for cross-attention layers") cross_attention_outputs = self.crossattention( query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) query_attention_output = cross_attention_outputs[0] # add cross attentions if we output attention weights outputs = outputs + cross_attention_outputs[1:] layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output, ) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :], ).to(layer_output.device) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output # Copied from transformers.models.blip_2.modeling_blip_2.Blip2QFormerEncoder with Blip2->InstructBlip class InstructBlipQFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList( [InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, query_length=query_length, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if query_length > 0 and layer_module.has_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class InstructBlipQFormerEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.config = config def forward( self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0, ): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids.to(embeddings.device)) embeddings = embeddings + position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = embeddings.to(self.layernorm.weight.dtype) embeddings = self.layernorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class InstructBlipQFormerModel(InstructBlipPreTrainedModel): """ Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the instruction as input. """ _supports_attention_backend = False # adds position on attn weights before last matmul _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False def __init__(self, config: InstructBlipQFormerConfig): super().__init__(config) self.config = config self.embeddings = InstructBlipQFormerEmbeddings(config) self.encoder = InstructBlipQFormerEncoder(config) self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool = False, ) -> torch.Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`tuple[int]`): The shape of the input to the model. device: (`torch.device`): The device of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})", ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, query_embeds: Optional[torch.Tensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Cache` of length `config.n_layers` with each tuple having 4 tensors of: shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None and query_embeds is None: raise ValueError("You have to specify query_embeds when input_ids is None") query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds, ) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if isinstance(encoder_hidden_states, list): encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() else: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if isinstance(encoder_attention_mask, list): encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length, ) sequence_output = encoder_outputs[0] pooled_output = sequence_output[:, 0, :] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" InstructBLIP base Model consisting of language model, qformer and vision encoder. """ ) class InstructBlipModel(InstructBlipPreTrainedModel): main_input_name = "pixel_values" _keep_in_fp32_modules = ["query_tokens"] # TODO @ArthurZucker I don't know why this is required for FP8 def __init__(self, config: InstructBlipConfig): super().__init__(config) self.vision_model = InstructBlipVisionModel(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = InstructBlipQFormerModel(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) self.language_model = AutoModel.from_config(config.text_config) if self.language_model._no_split_modules is not None: self._no_split_modules.extend(self.language_model._no_split_modules) if self.language_model._keep_in_fp32_modules is not None: self._keep_in_fp32_modules.extend(self.language_model._keep_in_fp32_modules) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared def _preprocess_accelerate(self): r""" Some pre-processing hacks to make the model `accelerate` compatible. Check https://github.com/huggingface/transformers/pull/21707 for more details. """ hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: # warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`. logger.warning( "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." " Please pass a `device_map` that contains `language_model` to remove this warning." " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for" " more details on creating a `device_map` for large models.", ) if hasattr(self.language_model, "_hf_hook"): self.language_model._hf_hook.io_same_device = True # For `generate` compatibility def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, use_cache: Optional[bool] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]: r""" qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided to serve as text prompt, which the Q-Former model will encode. Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) return InstructBlipForConditionalGenerationModelOutput( vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @auto_docstring( custom_intro=""" InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision encoder, Querying Transformer (Q-Former) and a language model. One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token. """ ) class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel, GenerationMixin): config: InstructBlipConfig main_input_name = "pixel_values" _can_compile_fullgraph = True _keep_in_fp32_modules = ["query_tokens"] # TODO @ArthurZucker I don't know why this is required for FP8 def __init__(self, config: InstructBlipConfig): super().__init__(config) self.vision_model = InstructBlipVisionModel._from_config(config.vision_config) self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) self.qformer = InstructBlipQFormerModel._from_config(config.qformer_config) self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size) if config.use_decoder_only_language_model: language_model = AutoModelForCausalLM.from_config(config.text_config) else: language_model = AutoModelForSeq2SeqLM.from_config(config.text_config) if language_model._no_split_modules is not None: self._no_split_modules.extend(language_model._no_split_modules) if language_model._keep_in_fp32_modules is not None: self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules) self.language_model = language_model # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_output_embeddings(self, new_embeddings): self.language_model.set_output_embeddings(new_embeddings) def get_output_embeddings(self) -> nn.Module: return self.language_model.get_output_embeddings() def get_encoder(self): return self.language_model.get_encoder() def get_decoder(self): return self.language_model.get_decoder() # Copied from transformers.models.instructblip.modeling_instructblip.InstructBlipModel._tie_weights def _tie_weights(self): if not self.config.use_decoder_only_language_model: self.language_model.encoder.embed_tokens = self.language_model.shared self.language_model.decoder.embed_tokens = self.language_model.shared # Copied from transformers.models.instructblip.modeling_instructblip.InstructBlipModel._preprocess_accelerate def _preprocess_accelerate(self): r""" Some pre-processing hacks to make the model `accelerate` compatible. Check https://github.com/huggingface/transformers/pull/21707 for more details. """ hf_device_map = self.hf_device_map if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: # warn users about unexpected behavior when using multi-GPU + InstructBLIP + `accelerate`. logger.warning( "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." " Please pass a `device_map` that contains `language_model` to remove this warning." " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for" " more details on creating a `device_map` for large models.", ) if hasattr(self.language_model, "_hf_hook"): self.language_model._hf_hook.io_same_device = True # For `generate` compatibility def get_image_features( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, interpolate_pos_encoding: Optional[bool] = False, return_dict: Optional[bool] = False, ): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. """ # step 1: forward the images through the vision encoder, # to get image embeddings of shape (batch_size, seq_len, hidden_size) vision_outputs = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) image_embeds = vision_outputs[0] # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) # difference with BLIP-2 here: we also feed the instruction prompt to the Q-Former query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device) if qformer_attention_mask is None: qformer_attention_mask = torch.ones_like(qformer_input_ids) qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1) query_outputs = self.qformer( input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True, ) query_output = query_outputs[0][:, : query_tokens.size(1), :] # step 3: use the language model, conditioned on the query outputs and the prompt language_model_inputs = self.language_projection(query_output) if return_dict: return language_model_inputs, vision_outputs, query_outputs return language_model_inputs def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: bool = False, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]: r""" qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided to serve as text prompt, which the Q-Former model will encode. Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary of the language model. Input tokens can optionally be provided to serve as text prompt, which the language model can continue. Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for details. [What are input IDs?](../glossary#input-ids) decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Only relevant in case an encoder-decoder language model (like T5) is used. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration >>> import torch >>> from PIL import Image >>> import requests >>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b") >>> device = "cuda" if torch.cuda.is_available() else "cpu" >>> model.to(device) # doctest: +IGNORE_RESULT >>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> prompt = "What is unusual about this image?" >>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device) >>> outputs = model.generate( ... **inputs, ... do_sample=False, ... num_beams=5, ... max_length=256, ... min_length=1, ... top_p=0.9, ... repetition_penalty=1.5, ... length_penalty=1.0, ... temperature=1, ... ) >>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip() >>> print(generated_text) The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation. ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) if self.config.use_decoder_only_language_model: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs, ) logits = outputs.logits if return_dict else outputs[0] loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) else: outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, use_cache=use_cache, **kwargs, ) loss = outputs.loss if return_dict else outputs[0] logits = outputs.logits if return_dict else outputs[1] return InstructBlipForConditionalGenerationModelOutput( loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs, ) @torch.no_grad() def generate( self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor] = None, qformer_attention_mask: Optional[torch.LongTensor] = None, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, **generate_kwargs, ) -> torch.LongTensor: """ Overrides `generate` function to be able to use the model as a conditional generator. Args: pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): Input images to be processed. qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt to be fed to the Q-Former module. qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): The sequence used as a prompt for the generation. attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): Mask to avoid performing attention on padding token indices. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate the positional encoding of the image embeddings. Returns: captions (list): A list of strings of length batch_size * num_captions. """ if hasattr(self, "hf_device_map"): # preprocess for `accelerate` self._preprocess_accelerate() batch_size = pixel_values.shape[0] language_model_inputs, vision_outputs, query_outputs = self.get_image_features( pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True, ) if inputs_embeds is None: if input_ids is None: image_tokens = [self.config.image_token_index] * self.config.num_query_tokens start_tokens = image_tokens + [self.config.text_config.bos_token_id] input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device) input_ids = input_ids.repeat(batch_size, 1) inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs) inputs = {"inputs_embeds": inputs_embeds, "attention_mask": attention_mask} if not self.language_model.config.is_encoder_decoder: inputs["input_ids"] = input_ids outputs = self.language_model.generate(**inputs, **generate_kwargs) return outputs __all__ = [ "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ]
transformers/src/transformers/models/instructblip/modeling_instructblip.py/0
{ "file_path": "transformers/src/transformers/models/instructblip/modeling_instructblip.py", "repo_id": "transformers", "token_count": 32973 }
519
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Video processor class for InternVL.""" from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, SizeDict, ) from ...processing_utils import Unpack, VideosKwargs from ...utils import ( TensorType, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, ) from ...utils.import_utils import requires from ...video_processing_utils import BaseVideoProcessor from ...video_utils import VideoMetadata, group_videos_by_shape, reorder_videos if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F if is_torch_available(): import torch if is_vision_available(): from ...image_utils import PILImageResampling class InternVLVideoProcessorInitKwargs(VideosKwargs): initial_shift: Union[bool, float, int] @requires(backends=("torchvision",)) class InternVLVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} do_resize = True do_rescale = True do_normalize = True do_convert_rgb = True initial_shift = True do_sample_frames = False # Set to False for BC, recommended to set `True` in new models valid_kwargs = InternVLVideoProcessorInitKwargs model_input_names = ["pixel_values_videos"] def __init__(self, **kwargs: Unpack[InternVLVideoProcessorInitKwargs]): super().__init__(**kwargs) def sample_frames( self, video: "torch.Tensor", metadata: Optional[Union[VideoMetadata, dict]] = None, num_frames: Optional[int] = None, fps: Optional[Union[int, float]] = None, initial_shift: Optional[Union[bool, float, int]] = None, ): """ Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames. If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames` and `fps` are mutually exclusive. Args: video (`torch.Tensor`): Video that need to be sampled. metadata (`VideoMetadata`, *optional*): Metadata of the video containing information about total duration, fps and total number of frames. num_frames (`int`, *optional*): Maximum number of frames to sample. Defaults to `self.num_frames`. fps (`int` or `float`, *optional*): Target frames to sample per second. Defaults to `self.fps`. initial_shift (`bool`, `float` or `int`, defaults to `self.initial_shift`): The initial shift to apply when sampling frames. If `True`, the shift is set so that frames are sampled from the middle of the video. Returns: torch.Tensor: Sampled video frames. """ num_frames = num_frames if num_frames is not None else self.num_frames initial_shift = initial_shift if initial_shift is not None else self.initial_shift total_num_frames = video.shape[0] # If num_frames is not given but fps is, calculate num_frames from fps if num_frames is None and fps is not None: if metadata is None: raise ValueError( "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. " "Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video" ) num_frames = int(total_num_frames / metadata["fps"] * fps) if initial_shift is True: initial_shift = total_num_frames / num_frames / 2 if num_frames > total_num_frames: raise ValueError( f"Video can't be sampled. The `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. " ) indices = torch.arange(initial_shift, total_num_frames, total_num_frames / num_frames).int() video = video[indices].contiguous() return video def _preprocess( self, videos: list["torch.Tensor"], video_metadata: Union[list[VideoMetadata], list[dict]], do_convert_rgb: bool, do_resize: bool, size: SizeDict, size_divisor: Optional[int], interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, do_pad: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_sample_frames: Optional[bool] = None, fps: Optional[Union[int, float]] = None, num_frames: Optional[int] = None, initial_shift: Optional[Union[bool, float, int]] = None, return_tensors: Optional[Union[str, TensorType]] = None, device: Optional["torch.Tensor"] = None, ) -> BatchFeature: if do_sample_frames: # Sample video frames videos = [ self.sample_frames(video, metadata, fps=fps, num_frames=num_frames, initial_shift=initial_shift) for video, metadata in zip(videos, video_metadata) ] # We need to sample frames first before moving to device, if `do_sample_frames=True`. Otherwise # moving the whole video incurs high GPU mem usage for long videos if device is not None: videos = [video.to(device) for video in videos] # Group videos by size for batched resizing grouped_videos, grouped_videos_index = group_videos_by_shape(videos) resized_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): if do_convert_rgb: stacked_videos = self.convert_to_rgb(stacked_videos) if do_resize: stacked_videos = self.resize( stacked_videos, size=size, size_divisor=size_divisor, interpolation=interpolation ) resized_videos_grouped[shape] = stacked_videos resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index) # Group videos by size for further processing # Needed in case do_resize is False, or resize returns videos with different sizes grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos) processed_videos_grouped = {} for shape, stacked_videos in grouped_videos.items(): if do_center_crop: stacked_videos = self.center_crop(stacked_videos, crop_size) # Fused rescale and normalize stacked_videos = self.rescale_and_normalize( stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_videos_grouped[shape] = stacked_videos processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index) processed_videos = torch.stack(processed_videos, dim=0) if return_tensors else processed_videos return BatchFeature(data={"pixel_values_videos": processed_videos}, tensor_type=return_tensors) __all__ = ["InternVLVideoProcessor"]
transformers/src/transformers/models/internvl/video_processing_internvl.py/0
{ "file_path": "transformers/src/transformers/models/internvl/video_processing_internvl.py", "repo_id": "transformers", "token_count": 3214 }
520
# coding=utf-8 # Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """KOSMOS-2 model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Kosmos2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2TextModel`]. It is used to instantiate a KOSMOS-2 text decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text decoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65037): Vocabulary size of the Kosmos2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Kosmos2Model`]. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). embed_dim (`int`, *optional*, defaults to 2048): Dimensionality of the layers and the pooler layer. layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. ffn_dim (`int`, *optional*, defaults to 8192): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_embedding (`bool`, *optional*, defaults to `True`): Scale embeddings by diving by sqrt(embed_dim). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). pad_token_id (`int`, *optional*, defaults to 1): Token id used for padding. bos_token_id (`int`, *optional*, defaults to 0): Token id used for beginning of string. eos_token_id (`int`, *optional*, defaults to 2): Token id used for end of string. ```""" model_type = "kosmos_2_text_model" base_config_key = "text_config" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "attention_heads", "hidden_size": "embed_dim", "num_hidden_layers": "layers", } def __init__( self, vocab_size=65037, max_position_embeddings=2048, embed_dim=2048, layers=24, ffn_dim=8192, attention_heads=32, activation_function="gelu", dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, layerdrop=0.0, layer_norm_eps=1e-5, init_std=0.02, scale_embedding=True, use_cache=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.embed_dim = embed_dim self.layers = layers self.ffn_dim = ffn_dim self.attention_heads = attention_heads self.activation_function = activation_function self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.init_std = init_std self.scale_embedding = scale_embedding self.use_cache = use_cache class Kosmos2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). ```""" model_type = "kosmos_2_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=1024, intermediate_size=4096, num_hidden_layers=24, num_attention_heads=16, num_channels=3, image_size=224, patch_size=14, hidden_act="quick_gelu", layer_norm_eps=1e-5, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.image_size = image_size self.initializer_range = initializer_range self.initializer_factor = initializer_factor self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act class Kosmos2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Kosmos2Model`]. It is used to instantiate a KOSMOS-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the KOSMOS-2 [microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Kosmos2VisionConfig`]. latent_query_num (`int`, *optional*, defaults to 64): The number of latent query tokens that represent the image features used in the text decoder component. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Kosmos2Config, Kosmos2Model >>> # Initializing a Kosmos-2 kosmos-2-patch14-224 style configuration >>> configuration = Kosmos2Config() >>> # Initializing a model (with random weights) from the kosmos-2-patch14-224 style configuration >>> model = Kosmos2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "kosmos-2" sub_configs = {"text_config": Kosmos2TextConfig, "vision_config": Kosmos2VisionConfig} def __init__( self, text_config=None, vision_config=None, latent_query_num=64, **kwargs, ): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `Kosmos2TextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. Initializing the `Kosmos2VisionConfig` with default values.") self.text_config = Kosmos2TextConfig(**text_config) self.vision_config = Kosmos2VisionConfig(**vision_config) self.latent_query_num = latent_query_num __all__ = ["Kosmos2Config"]
transformers/src/transformers/models/kosmos2/configuration_kosmos2.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2/configuration_kosmos2.py", "repo_id": "transformers", "token_count": 4549 }
521
# coding=utf-8 # Copyright 2025 Kyutai and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import types from typing import Optional, Union import numpy as np import torch import torch.nn as nn from ...cache_utils import Cache from ...feature_extraction_utils import BatchFeature from ...generation import GenerationConfig, GenerationMixin from ...modeling_utils import PreTrainedModel from ...utils import PaddingStrategy, TensorType, logging from ..auto import AutoModel from ..encodec.feature_extraction_encodec import EncodecFeatureExtractor from ..llama.modeling_llama import LlamaForCausalLM from ..mimi.modeling_mimi import MimiConv1dPaddingCache from ..moshi.modeling_moshi import MoshiModel, MoshiPreTrainedModel logger = logging.get_logger(__name__) class KyutaiSpeechToTextFeatureExtractor(EncodecFeatureExtractor): r""" Constructs an KyutaiSpeechToText feature extractor. This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: feature_size (`int`, *optional*, defaults to 1): The feature dimension of the extracted features. Use 1 for mono, 2 for stereo. sampling_rate (`int`, *optional*, defaults to 24000): The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz). padding_value (`float`, *optional*, defaults to 0.0): The value that is used to fill the padding values. chunk_length_s (`float`, *optional*): If defined the audio is pre-processed into chunks of lengths `chunk_length_s` and then encoded. overlap (`float`, *optional*): Defines the overlap between each chunk. It is used to compute the `chunk_stride` using the following formulae : `int((1.0 - self.overlap) * self.chunk_length)`. audio_delay_seconds (`float`, *optional*, defaults to 0.0): The delay in seconds to add after the audio (right padding). audio_silence_prefix_seconds (`float`, *optional*, defaults to 0.0): The silence prefix in seconds to add before the audio (left padding). """ def __init__( self, audio_delay_seconds: Optional[float] = 0.0, audio_silence_prefix_seconds: Optional[float] = 0.0, **super_kwargs, ): super().__init__(**super_kwargs) self.audio_delay_seconds = audio_delay_seconds self.audio_silence_prefix_seconds = audio_silence_prefix_seconds def __call__( self, raw_audio: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Optional[Union[bool, str, PaddingStrategy]] = None, truncation: Optional[bool] = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, sampling_rate: Optional[int] = None, ) -> BatchFeature: """ Main method to featurize and prepare for the model one or several sequence(s). Args: raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape `(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio (`feature_size = 2`). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, *optional*, defaults to `False`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass `sampling_rate` at the forward call to prevent silent errors. """ if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" f" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one.") elif padding is None: # by default let's pad the inputs padding = True is_batched = bool( isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list))) ) if is_batched: raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio] elif not is_batched and not isinstance(raw_audio, np.ndarray): raw_audio = np.asarray(raw_audio, dtype=np.float32) elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64): raw_audio = raw_audio.astype(np.float32) # always return batch if not is_batched: raw_audio = [np.asarray(raw_audio).T] # verify inputs are valid for idx, example in enumerate(raw_audio): if example.ndim > 2: raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}") if self.feature_size == 1 and example.ndim != 1: raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels") if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels") padded_inputs = None input_values = BatchFeature({"input_values": raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: max_length = min(array.shape[0] for array in raw_audio) nb_step = int(np.floor(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: max_length = max(array.shape[0] for array in raw_audio) nb_step = int(np.ceil(max_length / self.chunk_stride)) max_length = (nb_step - 1) * self.chunk_stride + self.chunk_length padding = "max_length" else: padded_inputs = input_values # normal padding on batch if padded_inputs is None: padded_inputs = self.pad( input_values, max_length=max_length, truncation=truncation, padding=padding, return_attention_mask=padding, ) if padding: padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask") # now let's padd left and right pad_left = int(self.audio_silence_prefix_seconds * self.sampling_rate) pad_right = int((self.audio_delay_seconds + 1.0) * self.sampling_rate) padded_inputs["input_values"] = np.pad( padded_inputs["input_values"], ((0, 0), (pad_left, pad_right)), mode="constant", constant_values=0.0, ) if padding: padded_inputs["padding_mask"] = np.pad( padded_inputs["padding_mask"], ((0, 0), (pad_left, pad_right)), mode="constant", constant_values=0, ) input_values = [] for example in padded_inputs.pop("input_values"): if self.feature_size == 1: example = example[..., None] input_values.append(example.T) padded_inputs["input_values"] = input_values if return_tensors is not None: padded_inputs = padded_inputs.convert_to_tensors(return_tensors) return padded_inputs class KyutaiSpeechToTextPreTrainedModel(MoshiPreTrainedModel): pass class KyutaiSpeechToTextConv1dPaddingCache(MimiConv1dPaddingCache): pass class KyutaiSpeechToTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.embed_tokens = nn.Embedding( config.vocab_size + (config.num_codebooks * config.codebook_vocab_size) + 1, config.hidden_size, padding_idx=config.audio_pad_token_id, ) audio_tokens_offsets = torch.arange(config.num_codebooks) * config.codebook_vocab_size audio_tokens_offsets += config.vocab_size audio_tokens_offsets = nn.functional.pad( audio_tokens_offsets, (1, 0) ) # pad one 0 to the left for the text token self.register_buffer("audio_tokens_offsets", audio_tokens_offsets, persistent=False) def forward(self, input_ids): input_ids = torch.where( input_ids == self.embed_tokens.padding_idx, input_ids, input_ids + self.audio_tokens_offsets ) inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = inputs_embeds.sum(dim=2) return inputs_embeds class KyutaiSpeechToTextModel(MoshiModel): def __init__(self, config): super().__init__(config) self.embed_tokens = KyutaiSpeechToTextEmbeddings(config) class KyutaiSpeechToTextForConditionalGeneration(LlamaForCausalLM, GenerationMixin, PreTrainedModel): _keep_in_fp32_modules_strict = ["codec_model"] def __init__(self, config): super().__init__(config) self.codec_model = AutoModel.from_config(config.codec_config) # we are in an edge case where for the codec_model self.can_generate is False, setting self.codec_model.generation_config to None # yet the codec_model needs a generation config to initalize it's cache for streaming inference # we therefore initialize a generation config for the codec model self.codec_model.generation_config = GenerationConfig.from_model_config(config.codec_config) def forward(self, **super_kwargs): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> import torch >>> from datasets import load_dataset, Audio >>> from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration >>> torch_device = "cuda" if torch.cuda.is_available() else "cpu" >>> model_id = "kyutai/stt-2.6b-en-trfs" >>> processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id) >>> model = KyutaiSpeechToTextForConditionalGeneration.from_pretrained(model_id, device_map=torch_device) >>> ds = load_dataset( ... "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" ... ) >>> ds = ds.cast_column("audio", Audio(sampling_rate=24000)) >>> inputs = processor( ... ds[0]["audio"]["array"], ... ) >>> inputs.to(torch_device) >>> output_tokens = model.generate(**inputs) >>> print(processor.batch_decode(output_tokens, skip_special_tokens=True)) ```""" super().forward(**super_kwargs) def _prepare_generation_config(self, *args, **kwargs): generation_config, model_kwargs = GenerationMixin._prepare_generation_config(*args, **kwargs) # this should be passed to the model kwargs for the input preparation model_kwargs["audio_window_size"] = ( generation_config.audio_window_size if hasattr(generation_config, "audio_window_size") else None ) return generation_config, model_kwargs def _prepare_model_inputs( self, inputs: Optional[torch.Tensor] = None, bos_token_id: Optional[torch.Tensor] = None, model_kwargs: Optional[dict[str, torch.Tensor]] = None, ) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]: inputs, input_name, model_kwargs = GenerationMixin._prepare_model_inputs( inputs=inputs, bos_token_id=bos_token_id, model_kwargs=model_kwargs, ) audio_window_size = model_kwargs.get("audio_window_size", None) if audio_window_size is None: audio_window_size = self.codec_model.get_encoded_length(model_kwargs["input_values"].shape[-1]).item() model_kwargs["audio_window_size"] = audio_window_size batch_size = inputs.shape[0] device = inputs.device # initialize audio tokens model_kwargs["audio_tokens"] = torch.zeros( (batch_size, audio_window_size, self.config.num_codebooks), device=device, dtype=torch.long, ) model_kwargs["current_window"] = ( torch.tensor([0, 0], device=device, dtype=torch.long).expand(batch_size, -1).contiguous() ) # let's use generate's cache preparation to prepare the cache for the codec model temporary_model_kwargs = {} # monkey patching the codec model with cache preparation methods since we don't want it to inherit fully from GenerationMixin # Add cache-related methods from GenerationMixin to codec model cache_methods = [ "_prepare_cache_for_generation", "_get_cache", ] for method in cache_methods: setattr(self.codec_model, method, types.MethodType(getattr(self, method).__func__, self.codec_model)) setattr( self.codec_model, "_supports_default_dynamic_cache", types.MethodType(lambda x: True, self.codec_model) ) self.codec_model.generation_config.cache_implementation = "dynamic" self.codec_model._prepare_cache_for_generation( generation_config=self.codec_model.generation_config, model_kwargs=temporary_model_kwargs, assistant_model=None, batch_size=batch_size, max_cache_length=self.config.codec_config.sliding_window, ) if "past_key_values" in temporary_model_kwargs: model_kwargs["encoder_past_key_values"] = temporary_model_kwargs["past_key_values"] # initialize the padding cache for the codec model per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], [] for layer_name in self.codec_model.encoder._mimiconv1d_layer_names: per_layer_padding.append(self.codec_model.encoder.get_submodule(layer_name).padding_total) per_layer_padding_mode.append(self.codec_model.encoder.get_submodule(layer_name).pad_mode) per_layer_in_channels.append(self.codec_model.encoder.get_submodule(layer_name).in_channels) # downsample layer per_layer_padding.append(self.codec_model.downsample.padding_total) per_layer_padding_mode.append(self.codec_model.downsample.pad_mode) per_layer_in_channels.append(self.codec_model.downsample.in_channels) model_kwargs["padding_cache"] = KyutaiSpeechToTextConv1dPaddingCache( num_layers=len(self.codec_model.encoder._mimiconv1d_layer_names) + 1, per_layer_padding=per_layer_padding, per_layer_padding_mode=per_layer_padding_mode, per_layer_in_channels=per_layer_in_channels, ) return inputs, input_name, model_kwargs def prepare_inputs_for_generation( self, *args, audio_tokens: Optional[torch.LongTensor] = None, input_values: Optional[torch.FloatTensor] = None, padding_mask: Optional[torch.Tensor] = None, audio_window_size: Optional[int] = None, current_window: Optional[tuple[int, int]] = None, encoder_past_key_values: Optional[Cache] = None, padding_cache: Optional[KyutaiSpeechToTextConv1dPaddingCache] = None, **kwargs, ): model_inputs = GenerationMixin.prepare_inputs_for_generation(*args, **kwargs) if input_values is not None: cache_position = model_inputs["cache_position"] start, end = current_window[0] # first cache position is for bos token, so we need to offset by -1 if cache_position[-1] - 1 >= end: # we need to encode the new audio tokens with torch.no_grad(): input_values_start_idx = start * self.config.frame_size input_values_end_idx = (start + audio_window_size) * self.config.frame_size current_input_values = input_values[..., input_values_start_idx:input_values_end_idx] codec_model_output = self.codec_model.encode( current_input_values, encoder_past_key_values=encoder_past_key_values, padding_cache=padding_cache, ) new_audio_tokens = codec_model_output.audio_codes.transpose(1, 2) audio_tokens.copy_(new_audio_tokens) start = end.clone() end = end + audio_window_size current_window.copy_( torch.tensor([start, end], device=current_window.device).expand(current_window.shape[0], -1) ) # first cache position is for bos token, so we need to offset by -1 current_audio_tokens_idxs = (cache_position - start - 1).clamp(min=0) current_audio_tokens = audio_tokens[:, current_audio_tokens_idxs, :] current_audio_tokens[:, cache_position == 0, :] = self.config.audio_bos_token_id input_ids = model_inputs.pop("input_ids") input_ids = torch.cat( [input_ids.unsqueeze(2), current_audio_tokens], dim=2, ) model_inputs["input_ids"] = input_ids return model_inputs # TODO: @eustlb, this should be standardized @classmethod def from_pretrained(cls, *args, **kwargs): if kwargs.get("output_loading_info", False): model, loading_info = PreTrainedModel.from_pretrained(*args, **kwargs) else: model = PreTrainedModel.from_pretrained(*args, **kwargs) # copy depth decoder generation conf attr to the depth decoder generation config prefix = "codec_" prefix_len = len(prefix) codec_model_attrs = { attr[prefix_len:]: value for attr, value in vars(model.generation_config).items() if attr.startswith(prefix) } vars(model.codec_model.generation_config).update({"_from_model_config": False, **codec_model_attrs}) # remove the depth decoder generation conf attr from the model generation config for attr in codec_model_attrs: delattr(model.generation_config, prefix + attr) if "output_loading_info" in kwargs: return model, loading_info else: return model # TODO: @eustlb, this should be standardized def save_pretrained(self, *args, **kwargs): prefix = "codec_" codec_model_attrs = self.codec_model.generation_config.to_diff_dict() codec_model_attrs.pop("transformers_version", None) for attr, value in codec_model_attrs.items(): setattr(self.generation_config, prefix + attr, value) PreTrainedModel.save_pretrained(self, *args, **kwargs) def generate(self, *args, **kwargs): r""" This method forwards all its arguments to GenerationMixin's [`~GenerationMixin.generate`]. Please refer to the docstring of this method for more information. """ max_new_tokens = kwargs.pop("max_new_tokens", None) input_values = kwargs.get("input_values") # TODO: @eustlb, we should have per-batch-idx values # here we do not use padding_mask to be aligned to what's done in the original codebase max_audio_frames = input_values.shape[-1] // self.config.codec_config.frame_size if max_new_tokens is None or max_new_tokens > max_audio_frames: if max_new_tokens is not None: logger.warning( f"`max_new_tokens` ({max_new_tokens}) is greater than the maximum number of audio frames ({max_audio_frames})." f"Setting `max_new_tokens` to {max_audio_frames}." ) max_new_tokens = max_audio_frames return GenerationMixin.generate( *args, max_new_tokens=max_new_tokens, **kwargs, ) __all__ = [ "KyutaiSpeechToTextPreTrainedModel", "KyutaiSpeechToTextModel", "KyutaiSpeechToTextForConditionalGeneration", "KyutaiSpeechToTextFeatureExtractor", ]
transformers/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py", "repo_id": "transformers", "token_count": 10049 }
522
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus and _encode_plus, in which the Rust tokenizer is used. """ import json from typing import Optional, Union from tokenizers import normalizers from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PaddingStrategy, PreTokenizedInput, TensorType, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import add_end_docstrings, logging from .tokenization_layoutlmv2 import ( LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer, ) logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): File containing the vocabulary. do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original LayoutLMv2). """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = LayoutLMv2Tokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( pre_tok_state.get("lowercase", do_lower_case) != do_lower_case or pre_tok_state.get("strip_accents", strip_accents) != strip_accents ): pre_tok_class = getattr(normalizers, pre_tok_state.pop("type")) pre_tok_state["lowercase"] = do_lower_case pre_tok_state["strip_accents"] = strip_accents self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state) self.do_lower_case = do_lower_case # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]: batched_input = [(text, pair)] if pair else [text] encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens @add_end_docstrings(LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) __all__ = ["LayoutLMv2TokenizerFast"]
transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py", "repo_id": "transformers", "token_count": 17206 }
523
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LED model configuration""" from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class LEDConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LEDModel`]. It is used to instantiate an LED model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LED [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the LED model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`LEDModel`] or [`TFLEDModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_encoder_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that the encoder might ever be used with. max_decoder_position_embeddings (`int`, *optional*, defaults to 16384): The maximum sequence length that the decoder might ever be used with. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) Example: ```python >>> from transformers import LEDModel, LEDConfig >>> # Initializing a LED allenai/led-base-16384 style configuration >>> configuration = LEDConfig() >>> # Initializing a model from the allenai/led-base-16384 style configuration >>> model = LEDModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "led" attribute_map = { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "attention_probs_dropout_prob": "attention_dropout", "initializer_range": "init_std", } def __init__( self, vocab_size=50265, max_encoder_position_embeddings=16384, max_decoder_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, classifier_dropout=0.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, attention_window: Union[list[int], int] = 512, **kwargs, ): self.vocab_size = vocab_size self.max_encoder_position_embeddings = max_encoder_position_embeddings self.max_decoder_position_embeddings = max_decoder_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.attention_window = attention_window super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) __all__ = ["LEDConfig"]
transformers/src/transformers/models/led/configuration_led.py/0
{ "file_path": "transformers/src/transformers/models/led/configuration_led.py", "repo_id": "transformers", "token_count": 2892 }
524
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for LLaVa-NeXT.""" from collections.abc import Iterable from typing import Optional, Union import numpy as np from ...image_processing_utils import ( BaseImageProcessor, BatchFeature, get_patch_output_size, get_size_dict, select_best_resolution, ) from ...image_transforms import ( PaddingMode, convert_to_rgb, get_resize_output_image_size, pad, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): from PIL import Image def divide_to_patches(image: np.array, patch_size: int, input_data_format) -> list[np.array]: """ Divides an image into patches of a specified size. Args: image (`np.array`): The input image. patch_size (`int`): The size of each patch. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list: A list of np.array representing the patches. """ patches = [] height, width = get_image_size(image, channel_dim=input_data_format) for i in range(0, height, patch_size): for j in range(0, width, patch_size): if input_data_format == ChannelDimension.LAST: patch = image[i : i + patch_size, j : j + patch_size] else: patch = image[:, i : i + patch_size, j : j + patch_size] patches.append(patch) return patches def expand_to_square(image: np.array, background_color, input_data_format) -> np.array: """ Expands an image to a square by adding a background color. """ height, width = get_image_size(image, channel_dim=input_data_format) if width == height: return image elif width > height: result = np.ones((width, width, image.shape[2]), dtype=image.dtype) * background_color result[(width - height) // 2 : (width - height) // 2 + height, :] = image return result else: result = np.ones((height, height, image.shape[2]), dtype=image.dtype) * background_color result[:, (height - width) // 2 : (height - width) // 2 + width] = image return result class LlavaNextImageProcessor(BaseImageProcessor): r""" Constructs a LLaVa-NeXT image processor. Based on [`CLIPImageProcessor`] with incorporation of additional techniques for processing high resolution images as explained in the [LLaVa paper](https://huggingface.co/papers/2310.03744). Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. image_grid_pinpoints (`List` *optional*, defaults to `[[672, 336], [336, 672], [672, 672], [336, 1008], [1008, 336]]`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values", "image_sizes"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, image_grid_pinpoints: Optional[list] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_pad: Optional[bool] = True, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) image_grid_pinpoints = ( image_grid_pinpoints if image_grid_pinpoints is not None else [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] ) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size") self.do_resize = do_resize self.size = size self.image_grid_pinpoints = image_grid_pinpoints self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_pad = do_pad self.do_convert_rgb = do_convert_rgb # Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize with CLIP->LLaVa def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ default_to_square = True if "shortest_edge" in size: size = size["shortest_edge"] default_to_square = False elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, default_to_square=default_to_square, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def pad( self, image: np.ndarray, padding: Union[int, tuple[int, int], Iterable[tuple[int, int]]], mode: PaddingMode = PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]] = 0.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`) dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected as input. Args: image (`np.ndarray`): The image to pad. padding (`int` or `tuple[int, int]` or `Iterable[tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim if isinstance(padding, int) or len(padding) != 4: return pad(image, padding, mode, constant_values, data_format, input_data_format) if input_data_format is None: input_data_format = infer_channel_dimension_format(image) if mode == PaddingMode.CONSTANT: image = np.pad(image, padding, mode="constant", constant_values=constant_values) elif mode == PaddingMode.REFLECT: image = np.pad(image, padding, mode="reflect") elif mode == PaddingMode.REPLICATE: image = np.pad(image, padding, mode="edge") elif mode == PaddingMode.SYMMETRIC: image = np.pad(image, padding, mode="symmetric") else: raise ValueError(f"Invalid padding mode: {mode}") image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image def _preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[int] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Image.Image: """ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_list_of_images(images) all_images = [] for image in images: if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) all_images.append(image) images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images ] return images def _resize_for_patching( self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension ) -> np.array: """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image (np.array): The input image. target_resolution (tuple): The target resolution (height, width) of the image. resample (`PILImageResampling`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: np.array: The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) # Resize the image resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return (paste_y, paste_y + r_y), (paste_x, paste_x + r_x) def _pad_for_patching( self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension ) -> np.array: """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = self.pad(image, padding=padding) return padded_image def get_image_patches( self, image: np.array, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension, ) -> list[np.array]: """ Process an image with variable resolutions by dividing it into patches. Args: image (np.array): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. resample (`PILImageResampling`): Resampling filter to use if resizing the image. data_format (`ChannelDimension` or `str`): The channel dimension format for the output image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: list[np.array]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError("grid_pinpoints must be a list of possible resolutions.") possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=input_data_format) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching( image, best_resolution, resample=resample, input_data_format=input_data_format ) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format) patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format) # make sure that all patches are in the input data format patches = [ to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format) for patch in patches ] resized_original_image = resize( image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, ) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching( self, pixel_values: list[np.ndarray], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_patch = max(len(x) for x in pixel_values) pixel_values = [ self.pad( image, padding=((0, max_patch - image.shape[0]), (0, 0), (0, 0), (0, 0)), data_format=data_format, input_data_format=input_data_format, ) for image in pixel_values ] return pixel_values def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, image_grid_pinpoints: Optional[list] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[int] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_pad: Optional[bool] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. image_grid_pinpoints (`List` *optional*, defaults to `self.image_grid_pinpoints`): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, param_name="size", default_to_square=False) image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else self.image_grid_pinpoints resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_pad = do_pad if do_pad is not None else self.do_pad do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) new_images = [] image_sizes = [get_image_size(image, channel_dim=input_data_format) for image in images] for image in images: # convert image into a list of patches # we intentionally use the same data format as the input data format image_patches = self.get_image_patches( image, image_grid_pinpoints, size=(size["shortest_edge"], size["shortest_edge"]) if "shortest_edge" in size else (min(size["height"], size["width"]), min(size["height"], size["width"])), patch_size=crop_size["height"], resample=resample, data_format=input_data_format, input_data_format=input_data_format, ) # preprocess patches pixel_values = self._preprocess( image_patches, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) pixel_values = np.array(pixel_values) new_images.append(pixel_values) if do_pad: processed_images = self._pad_for_batching(new_images) return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes}, tensor_type=return_tensors ) __all__ = ["LlavaNextImageProcessor"]
transformers/src/transformers/models/llava_next/image_processing_llava_next.py/0
{ "file_path": "transformers/src/transformers/models/llava_next/image_processing_llava_next.py", "repo_id": "transformers", "token_count": 15253 }
525
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/llava_onevision/modular_llava_onevision.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_llava_onevision.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch from ...image_processing_utils import BatchFeature, get_patch_output_size, select_best_resolution from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, divide_to_patches, group_images_by_shape, reorder_images, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size, ) from ...processing_utils import Unpack from ...utils import TensorType, auto_docstring, is_torchvision_v2_available if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class LlavaOnevisionFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ image_grid_pinpoints (`list[list[int]]`, *optional*): A list of possible resolutions to use for processing high resolution images. The best resolution is selected based on the original size of the image. Can be overridden by `image_grid_pinpoints` in the `preprocess` method. do_pad (`bool`, *optional*): Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest number of patches in the batch. Padding will be applied to the bottom and right with zeros. """ image_grid_pinpoints: Optional[list[list[int]]] do_pad: Optional[bool] @auto_docstring class LlavaOnevisionImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 384, "width": 384} default_to_square = False crop_size = None do_resize = True do_center_crop = None do_rescale = True do_normalize = True do_convert_rgb = True do_pad = True image_grid_pinpoints = [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]] # fmt: skip valid_kwargs = LlavaOnevisionFastImageProcessorKwargs model_input_names = ["pixel_values", "image_sizes", "batch_num_images"] def __init__(self, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaOnevisionFastImageProcessorKwargs]) -> BatchFeature: if isinstance(images, (tuple, list)) and isinstance(images[0], (tuple, list)): # if the first element is a list, we assume that all elements are lists batch_num_images = [len(x) for x in images] elif isinstance(images, (tuple, list)): # treat this as a single-image case for backward compatibility batch_num_images = [1] * len(images) else: batch_num_images = [1] kwargs["batch_num_images"] = batch_num_images return super().preprocess(images, **kwargs) def _resize_for_patching( self, image: "torch.Tensor", target_resolution: tuple, interpolation: "F.InterpolationMode", input_data_format: ChannelDimension, ) -> "torch.Tensor": """ Resizes an image to a target resolution while maintaining aspect ratio. Args: image ("torch.Tensor"): The input image. target_resolution (tuple): The target resolution (height, width) of the image. interpolation (`InterpolationMode`): Resampling filter to use if resizing the image. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: "torch.Tensor": The resized and padded image. """ new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format) # Resize the image resized_image = self.resize( image=image, size=SizeDict(height=new_height, width=new_width), interpolation=interpolation, ) return resized_image def _get_padding_size(self, original_resolution: tuple, target_resolution: tuple): original_height, original_width = original_resolution target_height, target_width = target_resolution paste_x, r_x = divmod(target_width - original_width, 2) paste_y, r_y = divmod(target_height - original_height, 2) return [paste_x, paste_y, paste_x + r_x, paste_y + r_y] def _pad_for_patching( self, image: "torch.Tensor", target_resolution: tuple, input_data_format: ChannelDimension ) -> "torch.Tensor": """ Pad an image to a target resolution while maintaining aspect ratio. """ new_resolution = get_patch_output_size(image, target_resolution, input_data_format) padding = self._get_padding_size(new_resolution, target_resolution) padded_image = F.pad(image, padding=padding) return padded_image def _get_image_patches( self, image: "torch.Tensor", grid_pinpoints, size: tuple, patch_size: int, interpolation: "F.InterpolationMode", ) -> list["torch.Tensor"]: """ Process an image with variable resolutions by dividing it into patches. Args: image ("torch.Tensor"): The input image to be processed. grid_pinpoints (List): A string representation of a list of possible resolutions. size (`tuple`): Size to resize the original image to. patch_size (`int`): Size of the patches to divide the image into. interpolation (`"InterpolationMode"`): Resampling filter to use if resizing the image. Returns: list["torch.Tensor"]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): raise TypeError("grid_pinpoints must be a list of possible resolutions.") possible_resolutions = grid_pinpoints image_size = get_image_size(image, channel_dim=ChannelDimension.FIRST) best_resolution = select_best_resolution(image_size, possible_resolutions) resized_image = self._resize_for_patching( image, best_resolution, interpolation=interpolation, input_data_format=ChannelDimension.FIRST ) padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=ChannelDimension.FIRST) patches = divide_to_patches(padded_image, patch_size=patch_size) resized_original_image = F.resize(image, size=size, interpolation=interpolation) image_patches = [resized_original_image] + patches return image_patches def _pad_for_batching( self, pixel_values: list["torch.Tensor"], ) -> list["torch.Tensor"]: """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[torch.Tensor]`): An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`) Returns: list[`torch.Tensor`]: The padded images. """ max_patch = max(len(x) for x in pixel_values) pixel_values = [ torch.nn.functional.pad(image, pad=[0, 0, 0, 0, 0, 0, 0, max_patch - image.shape[0]]) for image in pixel_values ] return pixel_values def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, image_grid_pinpoints: list[list[int]], interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], do_pad: bool, batch_num_images: list[int], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: processed_images = [] image_sizes = [] # only single image patching is supported need_patching = [n == 1 for n in batch_num_images for _ in range(n)] # Determine the size tuple if size and size.height and size.width: size_tuple = (size.height, size.width) else: size_tuple = (size.shortest_edge, size.shortest_edge) # Determine the patch size if crop_size and crop_size.height: patch_size = crop_size.height elif size and size.height: patch_size = size.height else: patch_size = size.shortest_edge for i, image in enumerate(images): if need_patching[i]: image_patches = self._get_image_patches( image, image_grid_pinpoints, size=size_tuple, patch_size=patch_size, interpolation=interpolation, ) else: padded_image = self.pad_to_square( images=image, background_color=tuple(int(x * 255) for x in self.image_mean) ) image_patches = [padded_image] # Group images by size for batched processing processed_image_patches_grouped = {} grouped_image_patches, grouped_image_patches_index = group_images_by_shape( image_patches, disable_grouping=disable_grouping ) for shape, stacked_image_patches in grouped_image_patches.items(): if do_resize: stacked_image_patches = self.resize( image=stacked_image_patches, size=size, interpolation=interpolation, ) if do_center_crop: stacked_image_patches = self.center_crop(stacked_image_patches, crop_size) # Fused rescale and normalize stacked_image_patches = self.rescale_and_normalize( stacked_image_patches, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_image_patches_grouped[shape] = stacked_image_patches processed_image_patches = reorder_images(processed_image_patches_grouped, grouped_image_patches_index) processed_image_patches = ( torch.stack(processed_image_patches, dim=0) if return_tensors else processed_image_patches ) processed_images.append(processed_image_patches) image_sizes.append(get_image_size(image, ChannelDimension.FIRST)) if do_pad: processed_images = self._pad_for_batching(processed_images) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature( data={"pixel_values": processed_images, "image_sizes": image_sizes, "batch_num_images": batch_num_images}, tensor_type=return_tensors, ) # Copied from transformers.models.llava.image_processing_llava_fast.LlavaImageProcessorFast.pad_to_square def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad( images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color ) return padded_images __all__ = ["LlavaOnevisionImageProcessorFast"]
transformers/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py/0
{ "file_path": "transformers/src/transformers/models/llava_onevision/image_processing_llava_onevision_fast.py", "repo_id": "transformers", "token_count": 6639 }
526
# coding=utf-8 # Copyright 2022 Google LLC., LongT5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LongT5 model.""" import copy import math import warnings from typing import Any, Optional, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, auto_docstring, is_torch_flex_attn_available, is_torch_fx_proxy, is_torchdynamo_compiling, logging, ) from ...utils.deprecation import deprecate_kwarg from .configuration_longt5 import LongT5Config if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) # TODO: Update before the merge def _pad_to_multiple(x: torch.Tensor, block_len: int, dim: int, pad_value: int = 0) -> torch.Tensor: """Pad a tensor so that a sequence length will be a multiple of `block_len`""" pad_len = -x.shape[dim] % block_len # Handle cases when an empty input sequence is given if not all(x.shape): new_shape = list(x.shape) new_shape[dim] += pad_len return torch.zeros(new_shape, dtype=x.dtype) pad = [(0, 0)] * x.ndim pad[dim] = (0, pad_len) pad = sum(pad[::-1], ()) x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) return x def _split_into_blocks(x: torch.Tensor, block_len: int, dim: int) -> torch.Tensor: """Split an input tensor into blocks of a given `block_len` along the given `dim`. If the dimension length is not a multiple of `block_len`, it will be padded first with selected `pad_value`. """ # pad tensor to multiple of block_len if x.shape[dim] % block_len != 0: x = _pad_to_multiple(x, block_len, dim, pad_value=0) num_blocks = x.shape[dim] // block_len output_shape = x.shape[:dim] + (num_blocks, block_len) + x.shape[(dim + 1) :] # If 0 is in output_shape, we cannot apply reshape because of incompatibility with ONNX conversion if 0 in output_shape: return torch.empty(output_shape, dtype=x.dtype, device=x.device) return x.reshape(output_shape) def _concatenate_3_blocks(x: torch.Tensor, block_dim: int, sequence_dim: int, pad_value: int = 0) -> torch.Tensor: """Concatenate three consecutive blocks for each input block for local attentiont. For more information, see: https://huggingface.co/papers/2112.07916. """ num_blocks = x.shape[block_dim] pad = [(0, 0)] * x.ndim pad[block_dim] = (1, 1) pad = sum(pad[::-1], ()) # [batch_size, num_blocks, block_len] -> [batch_size, num_blocks + 2, block_len] x = nn.functional.pad(x, pad=pad, mode="constant", value=pad_value) blocks_list: list[torch.Tensor] = [] for i in range(3): # We use indexing approach here: # https://numpy.org/doc/stable/user/basics.indexing.html#dealing-with-variable-numbers-of-indices-within-programs indices = [slice(0, None)] * x.ndim indices[block_dim] = slice(i, i + num_blocks) indices = tuple(indices) blocks_list.append(x[indices]) # [batch_size, num_blocks, 3 * block_len, ...] return torch.cat(blocks_list, dim=sequence_dim) def _make_3block_relative_position_ids(block_len: int) -> torch.Tensor: """Makes 3-blocked relative position ids for local attention.""" position_ids = torch.arange(3 * block_len, dtype=torch.int32) center_position_ids = position_ids[block_len:-block_len] # [block_len, 3 * block_len] relative_position_ids = position_ids.unsqueeze(0) - center_position_ids.unsqueeze(1) return relative_position_ids def _mask_local_attention_mask(local_attention_mask: torch.Tensor, block_len: int) -> torch.Tensor: """Mask local attention mask to enforce that tokens are not allowed to attend tokens farther than ``local_radius.""" relative_position_ids = _make_3block_relative_position_ids(block_len) locality_mask = torch.abs(relative_position_ids) < block_len locality_mask = locality_mask[None, None, :, :] locality_mask = locality_mask.to(local_attention_mask.device) return torch.logical_and(local_attention_mask, locality_mask) def _get_local_attention_mask(attention_mask: torch.Tensor, block_len: int, device: torch.device) -> torch.Tensor: """Prepare attention mask to be applied for a local attention.""" # [batch_size, num_blocks, block_len] _blocked_attention_mask = _split_into_blocks(attention_mask, block_len, dim=1) # [batch_size, num_block, 3 * block_len] _3blocked_attention_mask = _concatenate_3_blocks(_blocked_attention_mask, block_dim=1, sequence_dim=2) _blocked_attention_mask = _blocked_attention_mask.unsqueeze(-1) _3blocked_attention_mask = _3blocked_attention_mask.unsqueeze(-2) # [batch_size, num_block, block_len, 3 * block_len] local_attention_mask = torch.logical_and(_blocked_attention_mask, _3blocked_attention_mask) local_attention_mask = _mask_local_attention_mask(local_attention_mask, block_len) # [batch_size, 1, num_block, block_len, 3 * block_len] return local_attention_mask.unsqueeze(1).to(device) def _make_global_fixed_block_ids( attention_mask: torch.Tensor, global_block_size: int ) -> tuple[torch.Tensor, torch.Tensor]: """Obtain the "fixed block" global id corresponding to each input token. This implementation is a simplified version of the original Flaxformr implementation adopted from: https://github.com/google/flaxformer/blob/main/flaxformer/architectures/longt5/long_attention.py. In our scenario, as we use this strategy only for a decoder, orphan tokens, i.e. those tokens which do not make for the whole fixed block, are assigned to the preceding block. Padding tokens from the original sequence are represented by -1. """ batch_size, seq_len = attention_mask.shape[:2] def handle_orphan_tokens(block_ids: torch.Tensor) -> torch.Tensor: block_ends = (torch.arange(seq_len) % global_block_size) == global_block_size - 1 block_ends = block_ends.to(block_ids.device) true_block_ends = torch.logical_and(block_ends, block_ids >= 0) full_blocks = true_block_ends.sum(-1).unsqueeze(-1).type(block_ids.dtype) - 1 block_ids = torch.where(block_ids < full_blocks, block_ids, full_blocks) return block_ids fixed_block_mask = torch.ones_like(attention_mask, device=attention_mask.device) / global_block_size fixed_block_mask = torch.cumsum(fixed_block_mask, axis=1) - fixed_block_mask mask = torch.where(attention_mask != 0.0, 1.0, -1000.0).type(attention_mask.dtype) global_block_ids = torch.floor(mask + fixed_block_mask - 1.0).type(attention_mask.dtype) _global_block_ids_lower_bound = torch.tensor(-1, dtype=global_block_ids.dtype, device=global_block_ids.device) global_block_ids = torch.where( global_block_ids > _global_block_ids_lower_bound, global_block_ids, _global_block_ids_lower_bound ) # set padding tokens to -1 global_block_ids = (global_block_ids * attention_mask) + (attention_mask - 1) # [batch_size, seq_len] global_block_ids = handle_orphan_tokens(global_block_ids) num_globals = seq_len // global_block_size # [batch_size, seq_len // global_block_size] if num_globals > 0: _sequence_block_ids_max = torch.max(global_block_ids, dim=-1).values.repeat(num_globals, 1).transpose(0, 1) else: _sequence_block_ids_max = torch.zeros( batch_size, 0, dtype=global_block_ids.dtype, device=global_block_ids.device ) global_segment_ids = torch.cumsum(torch.ones(batch_size, num_globals), dim=-1) - 1 global_segment_ids = global_segment_ids.to(attention_mask.device) global_segment_ids = torch.where(global_segment_ids <= _sequence_block_ids_max, 1, 0) return global_block_ids.type(torch.int), global_segment_ids.type(torch.int) def _make_side_relative_position_ids(attention_mask: torch.Tensor, global_block_size: int) -> torch.Tensor: """Create the relative position tensor for local -> global attention.""" block_ids, global_segment_ids = _make_global_fixed_block_ids(attention_mask, global_block_size) global_seq_len = global_segment_ids.shape[-1] global_positions = torch.arange(global_seq_len, device=block_ids.device) side_relative_position = global_positions - block_ids[..., None] return side_relative_position.type(torch.int64) def _create_global_aggregates( hidden_states: torch.Tensor, block_ids: torch.Tensor, global_seq_len: int ) -> torch.Tensor: """Compute individual block aggregates by summing over individual blocks.""" # (batch..., seq_len, global_seq_len)) block_ids = block_ids.where( block_ids >= 0, torch.tensor(global_seq_len, dtype=block_ids.dtype, device=block_ids.device) ) one_hot_block_ids = nn.functional.one_hot(block_ids.type(torch.int64), global_seq_len + 1)[:, :, :-1] return torch.einsum("...nd,...ng->...gd", hidden_states, one_hot_block_ids.type(hidden_states.dtype)) # Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->LongT5 class LongT5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the LongT5 style. No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # LongT5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://huggingface.co/papers/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states try: from apex.normalization import FusedRMSNorm LongT5LayerNorm = FusedRMSNorm # noqa logger.info("Discovered apex.normalization.FusedRMSNorm - will use it instead of LongT5LayerNorm") except ImportError: # using the normal LongT5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to LongT5LayerNorm") pass # Copied from transformers.models.t5.modeling_t5.T5DenseActDense with T5->LongT5 class LongT5DenseActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.wo.weight, torch.Tensor) and hidden_states.dtype != self.wo.weight.dtype and self.wo.weight.dtype != torch.int8 ): hidden_states = hidden_states.to(self.wo.weight.dtype) hidden_states = self.wo(hidden_states) return hidden_states class LongT5DenseGatedActDense(nn.Module): def __init__(self, config: LongT5Config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.act = ACT2FN[config.dense_act_fn] def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->LongT5 class LongT5LayerFF(nn.Module): def __init__(self, config: LongT5Config): super().__init__() if config.is_gated_act: self.DenseReluDense = LongT5DenseGatedActDense(config) else: self.DenseReluDense = LongT5DenseActDense(config) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states # Copied from transformers.models.t5.modeling_t5.T5Attention with T5->LongT5 class LongT5Attention(nn.Module): def __init__( self, config: LongT5Config, has_relative_attention_bias=False, layer_idx: Optional[int] = None, ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, query_length, key_length, device=None, cache_position=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device if cache_position is None: context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None] else: context_position = cache_position[:, None].to(device) memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_values=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, cache_position=None, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder) batch_size, seq_length = hidden_states.shape[:2] # if key_value_states are provided this layer is used as a cross-attention layer for the decoder is_cross_attention = key_value_states is not None query_states = self.q(hidden_states) query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) # Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache` if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k(current_states) value_states = self.v(current_states) key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 scores = torch.matmul(query_states, key_states.transpose(3, 2)) if position_bias is None: key_length = key_states.shape[-2] # cache position is 0-indexed so we add 1 to get the real length of queries (aka with past) real_seq_length = query_length if query_length is not None else cache_position[-1] + 1 if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device, cache_position=cache_position ) position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: causal_mask = mask[:, :, :, : key_states.shape[-2]] position_bias = position_bias + causal_mask if self.pruned_heads: mask = torch.ones(position_bias.shape[1]) mask[list(self.pruned_heads)] = 0 position_bias_masked = position_bias[:, mask.bool()] else: position_bias_masked = position_bias scores += position_bias_masked # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(batch_size, -1, self.inner_dim) attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5LocalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() self.gradient_checkpointing = False # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = ( self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != "meta" else None ) memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # get query/key/value states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Compute scores scores = torch.einsum( "...qhd,...khd->...hqk", query_states, key_states ) # (batch_size, num_block, n_heads, block_len, 3 * block_len) if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if mask is not None: # Replace masked positions with -1e10 (according to the original implementation) mask = torch.where(mask > 0, 0.0, -1e10) # We need to adjust position bias shape to be sum with mask position_bias = position_bias + mask.transpose(1, 2) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (batch_size, num_blocks, n_heads, block_len, 3 * block_len) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) outputs = ( attn_output, position_bias, ) if output_attentions: outputs = outputs + (attn_weights,) return outputs class LongT5TransientGlobalAttention(nn.Module): def __init__(self, config: LongT5Config, has_relative_attention_bias: bool = False) -> None: super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.global_block_size = config.global_block_size self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() # Relativen attention bias & Layer norm for global attention if self.has_relative_attention_bias: self.global_relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.global_input_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) # Copied from transformers.models.t5.modeling_t5.T5Attention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod # Copied from transformers.models.t5.modeling_t5.T5Attention._relative_position_bucket def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets def compute_bias(self, block_length: int): """Compute binned relative position bias""" target_device = ( self.relative_attention_bias.weight.device if self.relative_attention_bias.weight.device.type != "meta" else None ) memory_position = torch.arange(3 * block_length, dtype=torch.long, device=target_device) context_position = memory_position[block_length:-block_length] # (block_length, 3 * block_length) relative_position = memory_position[None, :] - context_position[:, None] relative_position_bucket = self._relative_position_bucket( relative_position, # (block_length, 3 * block_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (block_length, 3 * block_length, num_heads) values = self.relative_attention_bias(relative_position_bucket) # (1, 1, num_heads, block_length, 3 * block_length) values = values.permute([2, 0, 1]).unsqueeze(0).unsqueeze(0) return values def compute_side_bias(self, mask: torch.Tensor, global_segment_ids: torch.Tensor) -> torch.Tensor: # (batch_size, 1, seq_len, global_seq_len) side_attention_mask = torch.eq(mask[..., None], global_segment_ids[:, None, :])[:, None, ...] attention_side_bias = torch.where(side_attention_mask > 0, 0.0, -1e10) # (batch_size, seq_len, global_seq_len) side_relative_position = _make_side_relative_position_ids(mask, self.global_block_size) side_relative_position_bucket = self._relative_position_bucket( side_relative_position, bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) # (batch_size, seq_len, global_seq_len, num_heads) side_bias = self.global_relative_attention_bias(side_relative_position_bucket) # (batch_size, num_heads, seq_len, global_seq_len) side_bias = side_bias.permute([0, 3, 1, 2]) # (batch_size, num_heads, seq_len, global_seq_len) attention_side_bias = attention_side_bias + side_bias return attention_side_bias def forward( self, hidden_states, mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, ): batch_size, seq_length = hidden_states.shape[:2] def shape(states): """projection""" return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim) def unshape(states): """reshape""" return states.contiguous().view(batch_size, -1, self.inner_dim) # Prepare components for transient-global attention # Obtain block_ids and global_segment_ids # global_seq_len := seq_len // self.global_block_size # shapes: (batch_size, seq_len) & (batch_size, global_seq_len) block_ids, global_segment_ids = _make_global_fixed_block_ids( mask if mask is not None else torch.ones(hidden_states.shape[:-1]), self.global_block_size, ) # Create global inputs _global_seq_len = global_segment_ids.shape[-1] global_inputs = _create_global_aggregates(hidden_states, block_ids, _global_seq_len) global_inputs = self.global_input_layer_norm(global_inputs) # get query states -> (batch_size, seq_length, n_heads, dim_per_head) query_states = shape(self.q(hidden_states)) key_states = shape(self.k(hidden_states)) value_states = shape(self.v(hidden_states)) # Get global/side key/value states shape: (batch_size, global_seq_len, n_heads, dim_per_head) side_key_states = shape(self.k(global_inputs)) side_value_states = shape(self.v(global_inputs)) # Split into blocks -> (batch_size, num_blocks, block_len, n_heads, dim_per_head) query_states = _split_into_blocks(query_states, self.block_len, dim=1) key_states = _split_into_blocks(key_states, self.block_len, dim=1) value_states = _split_into_blocks(value_states, self.block_len, dim=1) # Concatenate 3 blocks for keys and values -> (batch_size, num_blocks, 3 * block_len, n_heads, dim_per_head) key_states = _concatenate_3_blocks(key_states, block_dim=1, sequence_dim=2) value_states = _concatenate_3_blocks(value_states, block_dim=1, sequence_dim=2) # Tile side inputs across local key/value blocks # New shape: (batch_size, num_blocks, global_seq_len, n_heads, dim_per_head) reps = [1] * (side_key_states.ndim + 1) reps[1] = key_states.shape[1] side_key_states = side_key_states.unsqueeze(1).repeat(reps) side_value_states = side_value_states.unsqueeze(1).repeat(reps) # Concatenate "local" and "side"/"global" key/value states to allow each token to attend global aggregated ones # New shape: (batch_size, num_blocks, 3 * block_len + global_seq_len, n_heads, dim_per_head) key_states = torch.cat([key_states, side_key_states], dim=2) value_states = torch.cat([value_states, side_value_states], dim=2) # Compute scores -> (batch_size, num_block, n_heads, block_len, 3 * block_len + global_seq_len) scores = torch.einsum("...qhd,...khd->...hqk", query_states, key_states) if mask is not None: # We need to adjust position bias shape to be sum with mask local_attention_mask = _get_local_attention_mask(mask, self.block_len, hidden_states.device) # Replace masked positions with -10_000 (according to the original implementation) local_attention_mask = torch.where(local_attention_mask > 0, 0.0, -1e10) else: local_attention_mask = None if position_bias is None: # position_bias shape: # (1, 1, n_heads, block_len, 3 * block_len) if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, 1, self.n_heads, self.block_len, 3 * self.block_len), device=scores.device, dtype=scores.dtype, ) if self.gradient_checkpointing and self.training: position_bias.requires_grad = True else: position_bias = self.compute_bias(self.block_len) if local_attention_mask is not None: # (batch_size, 1, n_heads, block_len, 3 * block_len) position_bias = position_bias + local_attention_mask.transpose(1, 2) position_bias = position_bias.type(scores.dtype) # Calculate global/side bias - shape: # (batch_size, num_heads, seq_len, global_seq_len) if mask is None: mask = torch.ones(batch_size, seq_length) # (batch_size, num_heads, seq_len, global_seq_len) side_position_bias = self.compute_side_bias(mask, global_segment_ids) # (batch_size, num_blocks, num_heads, block_len, global_seq_len) side_position_bias = _split_into_blocks(side_position_bias, self.block_len, dim=-2).transpose(1, 2) side_position_bias = side_position_bias.type(scores.dtype).to(scores.device) # (batch_size, num_blocks, num_heads, block_len, 3 * block_len + global_seq_len) position_bias = torch.cat([position_bias, side_position_bias], dim=-1) scores += position_bias # (batch_size, num_blocks, n_heads, block_len, 3 * block_len + global_seq_len) attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_weights = attn_weights.type(value_states.dtype) attn_output = unshape(torch.einsum("...hqk,...khd->...qhd", attn_weights, value_states)) attn_output = attn_output[:, :seq_length, :] attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->LongT5 class LongT5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.SelfAttention = LongT5Attention( config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerLocalSelfAttention(nn.Module): """Local self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.LocalSelfAttention = LongT5LocalAttention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_values and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.LocalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class LongT5LayerTransientGlobalSelfAttention(nn.Module): """Transient-Global self attention used in encoder""" def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.TransientGlobalSelfAttention = LongT5TransientGlobalAttention( config, has_relative_attention_bias=has_relative_attention_bias ) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, output_attentions=False, **kwargs: Any, # to accept past_key_values and use_cache kwargs ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.TransientGlobalSelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs # Copied from transformers.models.t5.modeling_t5.T5LayerCrossAttention with T5->LongT5 class LongT5LayerCrossAttention(nn.Module): def __init__(self, config, layer_idx: Optional[int] = None): super().__init__() self.EncDecAttention = LongT5Attention(config, has_relative_attention_bias=False, layer_idx=layer_idx) self.layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_values=None, use_cache=False, query_length=None, output_attentions=False, cache_position=None, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, cache_position=cache_position, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class LongT5Block(GradientCheckpointingLayer): def __init__(self, config, has_relative_attention_bias=False, layer_idx: Optional[int] = None): super().__init__() self.is_decoder = config.is_decoder if config.is_decoder: attention_layer = LongT5LayerSelfAttention elif config.encoder_attention_type == "local": attention_layer = LongT5LayerLocalSelfAttention elif config.encoder_attention_type == "transient-global": attention_layer = LongT5LayerTransientGlobalSelfAttention else: raise ValueError( "For encoder attention mechanism, either `local` or `transient-global` attention type is expected, " f"but got {config.encoder_attention_type}." ) self.layer = nn.ModuleList() self.layer.append( attention_layer(config, has_relative_attention_bias=has_relative_attention_bias, layer_idx=layer_idx) ) if self.is_decoder: self.layer.append(LongT5LayerCrossAttention(config, layer_idx=layer_idx)) self.layer.append(LongT5LayerFF(config)) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_values=None, use_cache=False, output_attentions=False, return_dict=True, cache_position=None, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, query_length=cache_position[-1] + 1, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return ( (hidden_states,) + attention_outputs ) # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) @auto_docstring class LongT5PreTrainedModel(PreTrainedModel): config: LongT5Config base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["LongT5Block"] _can_compile_fullgraph = False # TODO: @raushan more involved due to local/global attn @property # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel.dummy_inputs def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, LongT5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (LongT5Model, LongT5ForConditionalGeneration, LongT5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, LongT5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, LongT5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, (LongT5Attention, LongT5LocalAttention, LongT5TransientGlobalAttention)): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) if isinstance(module, LongT5TransientGlobalAttention): module.global_relative_attention_bias.weight.data.normal_( mean=0.0, std=factor * ((d_model) ** -0.5) ) # Copied from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right with T5->LongT5 def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id if decoder_start_token_id is None: raise ValueError( "self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id. " "See LongT5 docs for more information." ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), decoder_start_token_id) shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class LongT5Stack(LongT5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.is_decoder = config.is_decoder self.local_radius = config.local_radius self.block_len = self.local_radius + 1 self.block = nn.ModuleList( [ LongT5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers) ] ) self.final_layer_norm = LongT5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.t5.modeling_t5.T5Stack.set_input_embeddings def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape if self.is_decoder: if use_cache and past_key_values is None: if self.config.is_encoder_decoder: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) else: past_key_values = DynamicCache() elif not self.is_decoder: # do not pass cache object down the line for encoder stack # it messes indexing later in decoder-stack because cache object is modified in-place past_key_values = None past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if self.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values, output_attentions, ) # We use local attention in encoder self-attention, otherwise standard self & cross attentions are used elif self.config.encoder_attention_type == "local": causal_mask = _get_local_attention_mask(attention_mask, self.block_len, inputs_embeds.device) else: # we need to use both local attention mask and standard extended mask for transient-global attention causal_mask = attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, layer_module in enumerate(self.block): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, # as a positional argument for gradient checkpointing layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position, ) # layer_outputs is a tuple with: # hidden-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @auto_docstring class LongT5Model(LongT5PreTrainedModel): _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, LongT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5Model.from_pretrained("google/long-t5-local-base") >>> # Let's try a very long encoder input. >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" LONGT5 Model with a `language modeling` head on top. """ ) class LongT5ForConditionalGeneration(LongT5PreTrainedModel, GenerationMixin): _keys_to_ignore_on_load_unexpected = [ r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight", ] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: LongT5Config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = LongT5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) LONGT5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [LONGT5 Training](./longt5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. decoder_head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` Examples: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> model = LongT5ForConditionalGeneration.from_pretrained( ... "Stancld/longt5-tglobal-large-16384-pubmed-3k_steps" ... ) >>> # Let's try a very long input. >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) @auto_docstring class LongT5EncoderModel(LongT5PreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight"] _keys_to_ignore_on_load_unexpected = [r"decoder"] def __init__(self, config: LongT5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = LongT5Stack(encoder_config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.FloatTensor], BaseModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. LongT5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [LONGT5 Training](./longt5#training). Example: ```python >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google/long-t5-local-base") >>> model = LongT5EncoderModel.from_pretrained("google/long-t5-local-base") >>> input_ids = tokenizer( ... 100 * "Studies have been shown that owning a dog is good for you ", return_tensors="pt" ... ).input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs __all__ = ["LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel"]
transformers/src/transformers/models/longt5/modeling_longt5.py/0
{ "file_path": "transformers/src/transformers/models/longt5/modeling_longt5.py", "repo_id": "transformers", "token_count": 44878 }
527
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/metaclip_2/modular_metaclip_2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_metaclip_2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 from dataclasses import dataclass from typing import Any, Callable, Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int from ...utils.generic import check_model_inputs from .configuration_metaclip_2 import MetaClip2Config, MetaClip2TextConfig, MetaClip2VisionConfig logger = logging.get_logger(__name__) class MetaClip2TextEmbeddings(nn.Module): def __init__(self, config: MetaClip2TextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class MetaClip2VisionEmbeddings(nn.Module): def __init__(self, config: MetaClip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, output_attentions: bool = True, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() if not output_attentions: attn_weights = None return attn_output, attn_weights class MetaClip2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: Union[MetaClip2VisionConfig, MetaClip2TextConfig]): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" batch_size, seq_length, embed_dim = hidden_states.shape queries = self.q_proj(hidden_states) keys = self.k_proj(hidden_states) values = self.v_proj(hidden_states) queries = queries.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2) keys = keys.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2) values = values.view(batch_size, seq_length, -1, self.head_dim).transpose(1, 2) # METACLIP_2 text model uses both `causal_attention_mask` and `attention_mask` # in case FA2 kernel is called, `is_causal` should be inferred from `causal_attention_mask` if self.config._attn_implementation == "flash_attention_2": self.is_causal = causal_attention_mask is not None else: if attention_mask is not None and causal_attention_mask is not None: attention_mask = attention_mask + causal_attention_mask elif causal_attention_mask is not None: attention_mask = causal_attention_mask attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, queries, keys, values, attention_mask, is_causal=self.is_causal, scaling=self.scale, dropout=0.0 if not self.training else self.dropout, output_attentions=output_attentions, ) attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class MetaClip2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states @auto_docstring class MetaClip2PreTrainedModel(PreTrainedModel): config: MetaClip2Config base_model_prefix = "metaclip_2" supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, MetaClip2TextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, MetaClip2VisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, MetaClip2Attention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, MetaClip2MLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, MetaClip2Model): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) elif isinstance(module, MetaClip2VisionModelWithProjection): nn.init.normal_( module.visual_projection.weight, std=self.config.hidden_size**-0.5 * self.config.initializer_factor, ) elif isinstance(module, MetaClip2TextModelWithProjection): nn.init.normal_( module.text_projection.weight, std=self.config.hidden_size**-0.5 * self.config.initializer_factor, ) elif isinstance(module, MetaClip2ForImageClassification): nn.init.normal_( module.classifier.weight, std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class MetaClip2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Union[MetaClip2VisionConfig, MetaClip2TextConfig]): super().__init__() self.embed_dim = config.hidden_size self.self_attn = MetaClip2Attention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = MetaClip2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class MetaClip2Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`MetaClip2EncoderLayer`]. Args: config: MetaClip2Config """ def __init__(self, config: MetaClip2Config): super().__init__() self.config = config self.layers = nn.ModuleList([MetaClip2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> BaseModelOutput: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, ) class MetaClip2TextTransformer(nn.Module): def __init__(self, config: MetaClip2TextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = MetaClip2TextEmbeddings(config) self.encoder = MetaClip2Encoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # For `pooled_output` computation self.eos_token_id = config.eos_token_id # For attention mask, it differs between `flash_attention_2` and other attention implementations self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" @check_model_inputs @auto_docstring def forward( self, input_ids, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPooling: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None and not self._use_flash_attention_2: # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, **kwargs, ) last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.final_layer_norm(last_hidden_state) # Use robust pooling like CLIP - finds the first EOS token position per sequence pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id).int().argmax(dim=-1), ] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The text model from METACLIP_2 without any head or projection on top. """ ) class MetaClip2TextModel(MetaClip2PreTrainedModel): config: MetaClip2TextConfig _no_split_modules = ["MetaClip2TextEmbeddings", "MetaClip2EncoderLayer"] _supports_flash_attn = False # mask creation only accounts for sdpa/eager def __init__(self, config: MetaClip2TextConfig): super().__init__(config) self.text_model = MetaClip2TextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> BaseModelOutputWithPooling: r""" Examples: ```python >>> from transformers import AutoTokenizer, MetaClip2TextModel >>> model = MetaClip2TextModel.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) @dataclass @auto_docstring( custom_intro=""" Base class for text model's outputs that also contains a pooling of the last hidden states. """ ) class MetaClip2TextModelOutput(ModelOutput): r""" text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The text embeddings obtained by applying the projection layer to the pooler_output. """ text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class MetaClip2TextModelWithProjection(MetaClip2PreTrainedModel): config: MetaClip2TextConfig _no_split_modules = ["MetaClip2TextEmbeddings", "MetaClip2EncoderLayer"] def __init__(self, config: MetaClip2TextConfig): super().__init__(config) text_model = MetaClip2TextModel._from_config(config) self.text_model = text_model.text_model self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> MetaClip2TextModelOutput: r""" Examples: ```python >>> from transformers import AutoTokenizer, MetaClip2TextModelWithProjection >>> model = MetaClip2TextModelWithProjection.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> text_embeds = outputs.text_embeds ```""" text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) pooled_output = text_outputs.pooler_output text_embeds = self.text_projection(pooled_output) return MetaClip2TextModelOutput( text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions, ) @dataclass @auto_docstring class MetaClip2Output(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2VisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`MetaClip2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`MetaClip2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class MetaClip2VisionTransformer(nn.Module): def __init__(self, config: MetaClip2VisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = MetaClip2VisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = MetaClip2Encoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, ) -> BaseModelOutputWithPooling: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) last_hidden_state = encoder_outputs.last_hidden_state pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) # contrastive loss function, adapted from # https://sachinruk.github.io/blog/2021-03-07-metaclip_2.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def metaclip_2_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 def _get_vector_norm(tensor: torch.Tensor) -> torch.Tensor: """ This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566 """ square_tensor = torch.pow(tensor, 2) sum_tensor = torch.sum(square_tensor, dim=-1, keepdim=True) normed_tensor = torch.pow(sum_tensor, 0.5) return normed_tensor @auto_docstring class MetaClip2Model(MetaClip2PreTrainedModel): config: MetaClip2Config _no_split_modules = ["MetaClip2TextEmbeddings", "MetaClip2EncoderLayer", "MetaClip2VisionEmbeddings"] _supports_flash_attn = False # mask creation only accounts for sdpa/eager def __init__(self, config: MetaClip2Config): super().__init__(config) if not isinstance(config.text_config, MetaClip2TextConfig): raise TypeError( "config.text_config is expected to be of type MetaClip2TextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, MetaClip2VisionConfig): raise TypeError( "config.vision_config is expected to be of type MetaClip2VisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_model = MetaClip2TextModel._from_config(text_config) self.text_model = text_model.text_model vision_model = MetaClip2VisionModel._from_config(vision_config) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @auto_docstring def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2TextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use METACLIP_2 model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) pooled_output = text_outputs.pooler_output text_features = self.text_projection(pooled_output) return text_features @auto_docstring def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2VisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use METACLIP_2 model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs.pooler_output image_features = self.visual_projection(pooled_output) return image_features @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> MetaClip2Output: r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use METACLIP_2 model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) text_outputs: BaseModelOutputWithPooling = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) image_embeds = vision_outputs.pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / _get_vector_norm(image_embeds) text_embeds = text_embeds / _get_vector_norm(text_embeds) # cosine similarity as logits logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device)) logits_per_text = logits_per_text * self.logit_scale.exp().to(text_embeds.device) logits_per_image = logits_per_text.t() loss = None if return_loss: loss = metaclip_2_loss(logits_per_text) return MetaClip2Output( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @auto_docstring( custom_intro=""" The vision model from METACLIP_2 without any head or projection on top. """ ) class MetaClip2VisionModel(MetaClip2PreTrainedModel): config: MetaClip2VisionConfig main_input_name = "pixel_values" _no_split_modules = ["MetaClip2EncoderLayer"] def __init__(self, config: MetaClip2VisionConfig): super().__init__(config) self.vision_model = MetaClip2VisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> BaseModelOutputWithPooling: r""" Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2VisionModel >>> model = MetaClip2VisionModel.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) @dataclass @auto_docstring( custom_intro=""" Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. """ ) class MetaClip2VisionModelOutput(ModelOutput): r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The image embeddings obtained by applying the projection layer to the pooler_output. """ image_embeds: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class MetaClip2VisionModelWithProjection(MetaClip2PreTrainedModel): config: MetaClip2VisionConfig main_input_name = "pixel_values" def __init__(self, config: MetaClip2VisionConfig): super().__init__(config) vision_model = MetaClip2VisionModel._from_config(config) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, ) -> MetaClip2VisionModelOutput: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2VisionModelWithProjection >>> model = MetaClip2VisionModelWithProjection.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/metaclip_2-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> image_embeds = outputs.image_embeds ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, ) pooled_output = vision_outputs.pooler_output image_embeds = self.visual_projection(pooled_output) return MetaClip2VisionModelOutput( image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions, ) @auto_docstring( custom_intro=""" METACLIP_2 vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of the patch tokens) e.g. for ImageNet. """ ) class MetaClip2ForImageClassification(MetaClip2PreTrainedModel): main_input_name = "pixel_values" def __init__(self, config: MetaClip2Config) -> None: super().__init__(config) self.num_labels = config.num_labels vision_model = MetaClip2VisionModel._from_config(config.vision_config) self.vision_model = vision_model.vision_model # Classifier head self.classifier = ( nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> ImageClassifierOutput: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs: BaseModelOutputWithPooling = self.vision_model( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = outputs.last_hidden_state # average pool the patch tokens sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1) # apply classifier logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "MetaClip2Model", "MetaClip2PreTrainedModel", "MetaClip2TextModel", "MetaClip2TextModelWithProjection", "MetaClip2VisionModel", "MetaClip2VisionModelWithProjection", "MetaClip2ForImageClassification", ]
transformers/src/transformers/models/metaclip_2/modeling_metaclip_2.py/0
{ "file_path": "transformers/src/transformers/models/metaclip_2/modeling_metaclip_2.py", "repo_id": "transformers", "token_count": 22330 }
528
# coding=utf-8 # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mistral model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class MistralConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1. [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MistralModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 14336): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`. head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`): The attention head dimension. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Mistral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import MistralModel, MistralConfig >>> # Initializing a Mistral 7B style configuration >>> configuration = MistralConfig() >>> # Initializing a model from the Mistral 7B style configuration >>> model = MistralModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mistral" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `MistralModel` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, head_dim=None, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=4096, attention_dropout=0.0, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.head_dim = head_dim # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["MistralConfig"]
transformers/src/transformers/models/mistral/configuration_mistral.py/0
{ "file_path": "transformers/src/transformers/models/mistral/configuration_mistral.py", "repo_id": "transformers", "token_count": 3034 }
529
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import re import requests import torch from PIL import Image from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.grounding_dino.image_processing_grounding_dino import GroundingDinoImageProcessor from transformers.models.grounding_dino.processing_grounding_dino import GroundingDinoProcessor from transformers.models.mm_grounding_dino.configuration_mm_grounding_dino import MMGroundingDinoConfig from transformers.models.mm_grounding_dino.modeling_mm_grounding_dino import MMGroundingDinoForObjectDetection from transformers.models.swin.configuration_swin import SwinConfig MODEL_NAME_TO_CHECKPOINT_URL_MAPPING = { "mm_grounding_dino_tiny_o365v1_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg/grounding_dino_swin-t_pretrain_obj365_goldg_20231122_132602-4ea751ce.pth", "mm_grounding_dino_tiny_o365v1_goldg_grit": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_20231128_200818-169cc352.pth", "mm_grounding_dino_tiny_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_v3det_20231218_095741-e316e297.pth", "mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth", "mm_grounding_dino_base_o365v1_goldg_v3det": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_obj365_goldg_v3det/grounding_dino_swin-b_pretrain_obj365_goldg_v3de-f83eef00.pth", "mm_grounding_dino_base_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-b_pretrain_all/grounding_dino_swin-b_pretrain_all-f9818a7c.pth", "mm_grounding_dino_large_o365v2_oiv6_goldg": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_obj365_goldg/grounding_dino_swin-l_pretrain_obj365_goldg-34dcdc53.pth", "mm_grounding_dino_large_all": "https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-l_pretrain_all/grounding_dino_swin-l_pretrain_all-56d69e78.pth", "llmdet_tiny": "https://huggingface.co/fushh7/LLMDet/resolve/main/tiny.pth?download=true", "llmdet_base": "https://huggingface.co/fushh7/LLMDet/resolve/main/base.pth?download=true", "llmdet_large": "https://huggingface.co/fushh7/LLMDet/resolve/main/large.pth?download=true", } MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING = { "mm_grounding_dino_tiny_o365v1_goldg": { "scores": torch.tensor([0.7722, 0.7584, 0.7984, 0.7163]), "boxes": torch.tensor( [ [0.5212, 0.1594, 0.5792, 0.3895], [0.5424, 0.0513, 0.9996, 0.7757], [0.0629, 0.1526, 0.2746, 0.2447], [0.0091, 0.1127, 0.4945, 0.9911], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_grit": { "scores": torch.tensor([0.7865, 0.7180, 0.7665, 0.8177]), "boxes": torch.tensor( [ [0.0084, 0.1129, 0.4940, 0.9895], [0.5214, 0.1597, 0.5786, 0.3875], [0.5413, 0.0507, 0.9998, 0.7768], [0.0631, 0.1527, 0.2740, 0.2449], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_v3det": { "scores": torch.tensor([0.5690, 0.5553, 0.6075, 0.5775]), "boxes": torch.tensor( [ [0.5393, 0.0502, 0.9989, 0.7763], [0.0090, 0.1125, 0.4950, 0.9895], [0.5207, 0.1589, 0.5794, 0.3889], [0.0625, 0.1519, 0.2750, 0.2446], ] ), }, "mm_grounding_dino_tiny_o365v1_goldg_grit_v3det": { "scores": torch.tensor([0.8381, 0.8204, 0.7970, 0.7175]), "boxes": torch.tensor( [ [0.0099, 0.1129, 0.4942, 0.9903], [0.5413, 0.0506, 0.9998, 0.7753], [0.0626, 0.1527, 0.2744, 0.2443], [0.5211, 0.1596, 0.5790, 0.3890], ] ), }, "mm_grounding_dino_base_o365v1_goldg_v3det": { "scores": torch.tensor([0.8418, 0.8364, 0.8342, 0.7885]), "boxes": torch.tensor( [ [0.5427, 0.0502, 0.9996, 0.7770], [0.0628, 0.1529, 0.2747, 0.2448], [0.0085, 0.1132, 0.4947, 0.9898], [0.5208, 0.1597, 0.5787, 0.3910], ] ), }, "mm_grounding_dino_base_all": { "scores": torch.tensor([0.4713]), "boxes": torch.tensor([[0.5423, 0.0507, 0.9998, 0.7761]]), }, "mm_grounding_dino_large_o365v2_oiv6_goldg": { "scores": torch.tensor([0.7824, 0.8275, 0.7715, 0.8211]), "boxes": torch.tensor( [ [0.0082, 0.1133, 0.4945, 0.9889], [0.5410, 0.0508, 0.9998, 0.7771], [0.0632, 0.1526, 0.2740, 0.2439], [0.5205, 0.1599, 0.5787, 0.3906], ] ), }, "mm_grounding_dino_large_all": { "scores": torch.tensor([0.7373, 0.6208, 0.6913, 0.4523]), "boxes": torch.tensor( [ [0.5424, 0.0509, 0.9997, 0.7765], [0.0632, 0.1529, 0.2744, 0.2447], [0.0121, 0.1125, 0.4947, 0.9884], [0.5206, 0.1597, 0.5789, 0.3933], ] ), }, "llmdet_tiny": { "scores": torch.tensor([0.7262, 0.7552, 0.7656, 0.8207]), "boxes": torch.tensor( [ [0.0114, 0.1132, 0.4947, 0.9854], [0.5387, 0.0513, 0.9992, 0.7765], [0.5212, 0.1605, 0.5788, 0.3890], [0.0634, 0.1536, 0.2743, 0.2440], ] ), }, "llmdet_base": { "scores": torch.tensor([0.8646, 0.7567, 0.6978, 0.8084]), "boxes": torch.tensor( [ [0.0632, 0.1529, 0.2745, 0.2438], [0.5420, 0.0512, 0.9989, 0.7774], [0.0110, 0.1134, 0.4950, 0.9875], [0.5209, 0.1602, 0.5789, 0.3908], ] ), }, "llmdet_large": { "scores": torch.tensor([0.7107, 0.8626, 0.7458, 0.8166]), "boxes": torch.tensor( [ [0.0147, 0.1128, 0.4957, 0.9858], [0.0634, 0.1528, 0.2744, 0.2447], [0.5414, 0.0511, 0.9997, 0.7776], [0.5209, 0.1602, 0.5792, 0.3916], ] ), }, } # fmt: off ORIGINAL_TO_CONVERTED_KEY_MAPPING = { # vision backbone r"backbone.patch_embed.projection.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.patch_embeddings.projection.\1", r"backbone.patch_embed.norm.(weight|bias)": r"model.backbone.conv_encoder.model.embeddings.norm.\1", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(relative_position_bias_table|relative_position_index)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3", r"backbone.stages.(\d+).blocks.(\d+).norm1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_before.\3", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.(query|key|value).(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.self.\3.\4", r"backbone.stages.(\d+).blocks.(\d+).attn.w_msa.proj.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.attention.output.dense.\3", r"backbone.stages.(\d+).blocks.(\d+).norm2.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.layernorm_after.\3", r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.0.0.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.intermediate.dense.\3", r"backbone.stages.(\d+).blocks.(\d+).ffn.layers.1.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.blocks.\2.output.dense.\3", r"backbone.stages.(\d+).downsample.reduction.weight": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.reduction.weight", r"backbone.stages.(\d+).downsample.norm.(weight|bias)": r"model.backbone.conv_encoder.model.encoder.layers.\1.downsample.norm.\2", r"backbone.norms.(\d+).(weight|bias)": r"model.backbone.conv_encoder.model.hidden_states_norms.stage\1.\2", r"neck.convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2", r"neck.convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2", r"neck.extra_convs.(\d+).conv.(weight|bias)": r"model.input_proj_vision.\1.0.\2", r"neck.extra_convs.(\d+).gn.(weight|bias)": r"model.input_proj_vision.\1.1.\2", # text backbone r"language_model.language_backbone.body.model.(.*)": r"model.text_backbone.\1", r"text_feat_map.(weight|bias)": r"model.text_projection.\1", # encoder r"encoder.fusion_layers.(\d+).gamma_v": r"model.encoder.layers.\1.fusion_layer.vision_param", r"encoder.fusion_layers.(\d+).gamma_l": r"model.encoder.layers.\1.fusion_layer.text_param", r"encoder.fusion_layers.(\d+).layer_norm_v.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_vision.\2", r"encoder.fusion_layers.(\d+).attn.v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.vision_proj.\2", r"encoder.fusion_layers.(\d+).attn.values_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_vision_proj.\2", r"encoder.fusion_layers.(\d+).attn.out_v_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_vision_proj.\2", r"encoder.fusion_layers.(\d+).layer_norm_l.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.layer_norm_text.\2", r"encoder.fusion_layers.(\d+).attn.l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.text_proj.\2", r"encoder.fusion_layers.(\d+).attn.values_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.values_text_proj.\2", r"encoder.fusion_layers.(\d+).attn.out_l_proj.(weight|bias)": r"model.encoder.layers.\1.fusion_layer.attn.out_text_proj.\2", r"encoder.layers.(\d+).self_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn.\2.\3", r"encoder.layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.self_attn_layer_norm.\2", r"encoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc1.\2", r"encoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.fc2.\2", r"encoder.layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.deformable_layer.final_layer_norm.\2", r"encoder.text_layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.\2.\3", r"encoder.text_layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.self_attn.out_proj.\2", r"encoder.text_layers.(\d+).norms.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_before.\2", r"encoder.text_layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc1.\2", r"encoder.text_layers.(\d+).ffn.layers.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.fc2.\2", r"encoder.text_layers.(\d+).norms.1.(weight|bias)": r"model.encoder.layers.\1.text_enhancer_layer.layer_norm_after.\2", r"encoder.bbox_head.cls_branch.bias": r"model.encoder_output_class_embed.bias", r"encoder.bbox_head.reg_branch.0.(weight|bias)": r"model.encoder_output_bbox_embed.layers.0.\1", r"encoder.bbox_head.reg_branch.2.(weight|bias)": r"model.encoder_output_bbox_embed.layers.1.\1", r"encoder.bbox_head.reg_branch.4.(weight|bias)": r"model.encoder_output_bbox_embed.layers.2.\1", # decoder r"decoder.norm.(weight|bias)": r"model.decoder.layer_norm.\1", r"decoder.ref_point_head.layers.(\d+).(weight|bias)": r"model.decoder.reference_points_head.layers.\1.\2", r"decoder.layers.(\d+).self_attn.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.self_attn.\2.\3", r"decoder.layers.(\d+).self_attn.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.self_attn.out_proj.\2", r"decoder.layers.(\d+).norms.0.(weight|bias)": r"model.decoder.layers.\1.self_attn_layer_norm.\2", r"decoder.layers.(\d+).cross_attn_text.attn.(query|key|value)_proj_(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.\2.\3", r"decoder.layers.(\d+).cross_attn_text.attn.out_proj.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text.out_proj.\2", r"decoder.layers.(\d+).norms.1.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_text_layer_norm.\2", r"decoder.layers.(\d+).cross_attn.(sampling_offsets|attention_weights|value_proj|output_proj).(weight|bias)": r"model.decoder.layers.\1.encoder_attn.\2.\3", r"decoder.layers.(\d+).norms.2.(weight|bias)": r"model.decoder.layers.\1.encoder_attn_layer_norm.\2", r"decoder.layers.(\d+).ffn.layers.0.0.(weight|bias)": r"model.decoder.layers.\1.fc1.\2", r"decoder.layers.(\d+).ffn.layers.1.(weight|bias)": r"model.decoder.layers.\1.fc2.\2", r"decoder.layers.(\d+).norms.3.(weight|bias)": r"model.decoder.layers.\1.final_layer_norm.\2", r"decoder.bbox_head.cls_branches.(\d+).bias": r"model.decoder.class_embed.\1.bias", r"decoder.bbox_head.reg_branches.(\d+).0.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.0.\2", r"decoder.bbox_head.reg_branches.(\d+).2.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.1.\2", r"decoder.bbox_head.reg_branches.(\d+).4.(weight|bias)": r"model.decoder.bbox_embed.\1.layers.2.\2", # other r"level_embed": r"model.level_embed", r"query_embedding.weight": r"model.query_position_embeddings.weight", r"memory_trans_fc.(weight|bias)": r"model.enc_output.\1", r"memory_trans_norm.(weight|bias)": r"model.enc_output_norm.\1", r"bbox_head.cls_branches.(\d+).bias": r"class_embed.\1.bias", r"bbox_head.reg_branches.(\d+).0.(weight|bias)": r"bbox_embed.\1.layers.0.\2", r"bbox_head.reg_branches.(\d+).2.(weight|bias)": r"bbox_embed.\1.layers.1.\2", r"bbox_head.reg_branches.(\d+).4.(weight|bias)": r"bbox_embed.\1.layers.2.\2", } # fmt: on def get_mm_grounding_dino_config(model_name: str) -> MMGroundingDinoConfig: if "tiny" in model_name: swin_image_size = 224 swin_window_size = 7 swin_embed_dim = 96 swin_depths = (2, 2, 6, 2) swin_num_heads = (3, 6, 12, 24) swin_out_features = ["stage2", "stage3", "stage4"] num_feature_levels = 4 elif "base" in model_name: swin_image_size = 384 swin_window_size = 12 swin_embed_dim = 128 swin_depths = (2, 2, 18, 2) swin_num_heads = (4, 8, 16, 32) swin_out_features = ["stage2", "stage3", "stage4"] num_feature_levels = 4 elif "large" in model_name: swin_image_size = 384 swin_window_size = 12 swin_embed_dim = 192 swin_depths = (2, 2, 18, 2) swin_num_heads = (6, 12, 24, 48) swin_out_features = ["stage1", "stage2", "stage3", "stage4"] num_feature_levels = 5 else: raise ValueError( f"Model name: {model_name} is not supported. Only `tiny`, `base` and `large` models are currently supported." ) backbone_config = SwinConfig( image_size=swin_image_size, window_size=swin_window_size, embed_dim=swin_embed_dim, depths=swin_depths, num_heads=swin_num_heads, out_features=swin_out_features, ) model_config = MMGroundingDinoConfig( backbone_config=backbone_config, num_feature_levels=num_feature_levels, ) return model_config def get_mm_grounding_dino_processor() -> GroundingDinoProcessor: img_processor = GroundingDinoImageProcessor() txt_processor = BertTokenizer.from_pretrained("bert-base-uncased") processor = GroundingDinoProcessor(img_processor, txt_processor) return processor # Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L8C1-L13C13 def correct_unfold_reduction_order(x: torch.Tensor) -> torch.Tensor: out_channel, in_channel = x.shape x = x.reshape(out_channel, in_channel // 4, 4).transpose(1, 2) x = x[:, [0, 2, 1, 3], :] x = x.reshape(out_channel, in_channel) return x # Copied from: https://github.com/iSEE-Laboratory/LLMDet/blob/96ec8c82a9d97b170db759e043afd5b81445d0f1/hf_model/mmdet2groundingdino_swint.py#L15C1-L20C13 def correct_unfold_norm_order(x: torch.Tensor) -> torch.Tensor: in_channel = x.shape[0] x = x.reshape(in_channel // 4, 4).transpose(0, 1) x = x[[0, 2, 1, 3], :] x = x.reshape(in_channel) return x def preprocess_old_state(state_dict: dict, config: MMGroundingDinoConfig) -> dict: """ Preprocesses old state dict to enable 1-1 mapping: - split qkv projections in Swin backbone - reorder reduction and norm parameters in Swin backbone - shift output norm indices in Swin backbone - shift output proj indices in neck - split q,k,v projections in text self and cross attentions in encoder and decoder - duplicate detection head parameters for decoder and encoder """ new_state_dict = state_dict.copy() for k in state_dict: if k.startswith("backbone"): if "downsample.reduction" in k: new_state_dict[k] = correct_unfold_reduction_order(new_state_dict.pop(k)) elif "downsample.norm" in k: new_state_dict[k] = correct_unfold_norm_order(new_state_dict.pop(k)) elif "w_msa.qkv" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("qkv", "query")] = q_param new_state_dict[k.replace("qkv", "key")] = k_param new_state_dict[k.replace("qkv", "value")] = v_param elif "backbone.norm" in k: match = re.match(r"backbone.norm(\d+).(weight|bias)", k) new_state_dict[f"backbone.norms.{int(match.group(1)) + 1}.{match.group(2)}"] = new_state_dict.pop(k) elif k.startswith("neck.extra_convs"): num_normal_convs = len(config.backbone_config.out_indices) if "gn" in k: match = re.match(r"neck.extra_convs.(\d+).gn.(weight|bias)", k) new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.gn.{match.group(2)}"] = ( new_state_dict.pop(k) ) elif "conv" in k: match = re.match(r"neck.extra_convs.(\d+).conv.(weight|bias)", k) new_state_dict[f"neck.extra_convs.{num_normal_convs + int(match.group(1))}.conv.{match.group(2)}"] = ( new_state_dict.pop(k) ) elif k.startswith("encoder"): if "self_attn.attn.in_proj" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("in", "query")] = q_param new_state_dict[k.replace("in", "key")] = k_param new_state_dict[k.replace("in", "value")] = v_param elif k.startswith("decoder"): if "self_attn.attn.in_proj" in k or "cross_attn_text.attn.in_proj" in k: q_param, k_param, v_param = new_state_dict.pop(k).chunk(3) new_state_dict[k.replace("in", "query")] = q_param new_state_dict[k.replace("in", "key")] = k_param new_state_dict[k.replace("in", "value")] = v_param elif k.startswith("bbox_head"): num_decoder_layers = config.decoder_layers match = re.match(r"bbox_head.(cls|reg)_branches.(\d+).(.*)", k) cls_or_reg = match.group(1) layer_idx = int(match.group(2)) suffix = match.group(3) if layer_idx < num_decoder_layers: new_key = f"decoder.bbox_head.{cls_or_reg}_branches.{layer_idx}.{suffix}" new_state_dict[new_key] = new_state_dict[k] # copy else: new_key = f"encoder.bbox_head.{cls_or_reg}_branch.{suffix}" new_state_dict[new_key] = new_state_dict.pop(k) # move # remove unused params if ( k == "dn_query_generator.label_embedding.weight" or k == "language_model.language_backbone.body.model.embeddings.position_ids" or k == "image_seperate.weight" or k.startswith("lmm") or k.startswith("connector") or k.startswith("region_connector") or k.startswith("ref_point_head") ): new_state_dict.pop(k) return new_state_dict # Copied from transformers/models/siglip2/convert_siglip2_to_hf.py def convert_old_keys_to_new_keys(state_dict_keys: list) -> dict: """ This function should be applied only once, on the concatenated keys to efficiently rename using the key mappings. """ output_dict = {} if state_dict_keys is not None: old_text = "\n".join(state_dict_keys) new_text = old_text for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items(): if replacement is None: new_text = re.sub(pattern, "", new_text) # an empty line continue new_text = re.sub(pattern, replacement, new_text) output_dict = dict(zip(old_text.split("\n"), new_text.split("\n"))) return output_dict def convert_mm_to_hf_state(original_state: dict, hf_cfg: MMGroundingDinoConfig) -> dict: original_state = preprocess_old_state(original_state, hf_cfg) original_state_keys = list(original_state.keys()) original_to_hf_key_map = convert_old_keys_to_new_keys(original_state_keys) hf_state = {} for original_key in original_state_keys: hf_key = original_to_hf_key_map[original_key] hf_state[hf_key] = original_state.pop(original_key) return hf_state def prepare_test_inputs(): image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(image_url, stream=True).raw) text = [["cat", "remote"]] return image, text @torch.no_grad() def convert_mm_grounding_dino_checkpoint( model_name: str, verify_outputs: bool, push_to_hub: bool, hub_user_name: str, ) -> tuple[MMGroundingDinoConfig, dict]: # Load original state checkpoint_url = MODEL_NAME_TO_CHECKPOINT_URL_MAPPING[model_name] print(f"Loading checkpoint from: {checkpoint_url}") ckpt = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") mm_state = ckpt["state_dict"] # Create hf model and processor print("Creating model...") hf_cfg = get_mm_grounding_dino_config(model_name) hf_state = convert_mm_to_hf_state(mm_state, hf_cfg) hf_model = MMGroundingDinoForObjectDetection(hf_cfg).eval() hf_model.load_state_dict(hf_state) hf_processor = get_mm_grounding_dino_processor() # Verify outputs if needed if verify_outputs: print("Running inference to verify outputs...") image, text = prepare_test_inputs() model_inputs = hf_processor(images=image, text=text, return_tensors="pt") model_outputs = hf_model(**model_inputs) results = hf_processor.post_process_grounded_object_detection( model_outputs, model_inputs.input_ids, box_threshold=0.4, text_threshold=0.3, ) result = results[0] print(result) expected = MODEL_NAME_TO_EXPECTED_OUTPUT_MAPPING[model_name] for key in expected: torch.testing.assert_close(result[key], expected[key], atol=1e-3, rtol=1e-3) print("Outputs match.") # Push to hub if needed if push_to_hub: print("Pushing to hub...") hub_url = f"{hub_user_name}/{model_name}" hf_model.push_to_hub(hub_url) hf_processor.push_to_hub(hub_url) print(f"Pushed to huggingface hub at: {hub_url}.") return hf_cfg, hf_state def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model-name", required=True, type=str, choices=list(MODEL_NAME_TO_CHECKPOINT_URL_MAPPING.keys()), help="URL to the original mm grounding dino checkpoint.", ) parser.add_argument("--hub-user-name", type=str, help="User name on the huggingface hub.") parser.add_argument("--push-to-hub", action="store_true", help="Whether to push model to hub or not.") parser.add_argument( "--verify-outputs", action="store_true", help="Whether to verify that model output is correct or not." ) return parser.parse_args() if __name__ == "__main__": args = parse_args() convert_mm_grounding_dino_checkpoint( args.model_name, args.verify_outputs, args.push_to_hub, args.hub_user_name, )
transformers/src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/mm_grounding_dino/convert_mm_grounding_dino_to_hf.py", "repo_id": "transformers", "token_count": 16889 }
530
# coding=utf-8 # Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MobileNetV1 model.""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_mobilenet_v1 import MobileNetV1Config logger = logging.get_logger(__name__) def _build_tf_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. """ tf_to_pt_map = {} if isinstance(model, MobileNetV1ForImageClassification): backbone = model.mobilenet_v1 else: backbone = model prefix = "MobilenetV1/Conv2d_0/" tf_to_pt_map[prefix + "weights"] = backbone.conv_stem.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = backbone.conv_stem.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = backbone.conv_stem.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = backbone.conv_stem.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = backbone.conv_stem.normalization.running_var for i in range(13): tf_index = i + 1 pt_index = i * 2 pointer = backbone.layer[pt_index] prefix = f"MobilenetV1/Conv2d_{tf_index}_depthwise/" tf_to_pt_map[prefix + "depthwise_weights"] = pointer.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var pointer = backbone.layer[pt_index + 1] prefix = f"MobilenetV1/Conv2d_{tf_index}_pointwise/" tf_to_pt_map[prefix + "weights"] = pointer.convolution.weight tf_to_pt_map[prefix + "BatchNorm/beta"] = pointer.normalization.bias tf_to_pt_map[prefix + "BatchNorm/gamma"] = pointer.normalization.weight tf_to_pt_map[prefix + "BatchNorm/moving_mean"] = pointer.normalization.running_mean tf_to_pt_map[prefix + "BatchNorm/moving_variance"] = pointer.normalization.running_var if isinstance(model, MobileNetV1ForImageClassification): prefix = "MobilenetV1/Logits/Conv2d_1c_1x1/" tf_to_pt_map[prefix + "weights"] = model.classifier.weight tf_to_pt_map[prefix + "biases"] = model.classifier.bias return tf_to_pt_map def load_tf_weights_in_mobilenet_v1(model, config, tf_checkpoint_path): """Load TensorFlow checkpoints in a PyTorch model.""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_checkpoint_path) tf_weights = {} for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_checkpoint_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = _build_tf_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info(f"Importing {name}") if name not in tf_weights: logger.info(f"{name} not in tf pre-trained weights, skipping") continue array = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") array = np.transpose(array, (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer array = array.squeeze().transpose() else: array = np.transpose(array, (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") logger.info(f"Initialize PyTorch weight {name} {array.shape}") pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/RMSProp", None) tf_weights.pop(name + "/RMSProp_1", None) tf_weights.pop(name + "/ExponentialMovingAverage", None) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}") return model def apply_tf_padding(features: torch.Tensor, conv_layer: nn.Conv2d) -> torch.Tensor: """ Apply TensorFlow-style "SAME" padding to a convolution layer. See the notes at: https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2 """ in_height, in_width = features.shape[-2:] stride_height, stride_width = conv_layer.stride kernel_height, kernel_width = conv_layer.kernel_size if in_height % stride_height == 0: pad_along_height = max(kernel_height - stride_height, 0) else: pad_along_height = max(kernel_height - (in_height % stride_height), 0) if in_width % stride_width == 0: pad_along_width = max(kernel_width - stride_width, 0) else: pad_along_width = max(kernel_width - (in_width % stride_width), 0) pad_left = pad_along_width // 2 pad_right = pad_along_width - pad_left pad_top = pad_along_height // 2 pad_bottom = pad_along_height - pad_top padding = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(features, padding, "constant", 0.0) class MobileNetV1ConvLayer(nn.Module): def __init__( self, config: MobileNetV1Config, in_channels: int, out_channels: int, kernel_size: int, stride: Optional[int] = 1, groups: Optional[int] = 1, bias: bool = False, use_normalization: Optional[bool] = True, use_activation: Optional[bool or str] = True, ) -> None: super().__init__() self.config = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") padding = 0 if config.tf_padding else int((kernel_size - 1) / 2) self.convolution = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias, padding_mode="zeros", ) if use_normalization: self.normalization = nn.BatchNorm2d( num_features=out_channels, eps=config.layer_norm_eps, momentum=0.9997, affine=True, track_running_stats=True, ) else: self.normalization = None if use_activation: if isinstance(use_activation, str): self.activation = ACT2FN[use_activation] elif isinstance(config.hidden_act, str): self.activation = ACT2FN[config.hidden_act] else: self.activation = config.hidden_act else: self.activation = None def forward(self, features: torch.Tensor) -> torch.Tensor: if self.config.tf_padding: features = apply_tf_padding(features, self.convolution) features = self.convolution(features) if self.normalization is not None: features = self.normalization(features) if self.activation is not None: features = self.activation(features) return features @auto_docstring class MobileNetV1PreTrainedModel(PreTrainedModel): config: MobileNetV1Config load_tf_weights = load_tf_weights_in_mobilenet_v1 base_model_prefix = "mobilenet_v1" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = [] def _init_weights(self, module: Union[nn.Linear, nn.Conv2d]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class MobileNetV1Model(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config, add_pooling_layer: bool = True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config depth = 32 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.conv_stem = MobileNetV1ConvLayer( config, in_channels=config.num_channels, out_channels=out_channels, kernel_size=3, stride=2, ) strides = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] self.layer = nn.ModuleList() for i in range(13): in_channels = out_channels if strides[i] == 2 or i == 0: depth *= 2 out_channels = max(int(depth * config.depth_multiplier), config.min_depth) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=strides[i], groups=in_channels, ) ) self.layer.append( MobileNetV1ConvLayer( config, in_channels=in_channels, out_channels=out_channels, kernel_size=1, ) ) self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): raise NotImplementedError @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.conv_stem(pixel_values) all_hidden_states = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) last_hidden_state = hidden_states if self.pooler is not None: pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1) else: pooled_output = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=all_hidden_states, ) @auto_docstring( custom_intro=""" MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class MobileNetV1ForImageClassification(MobileNetV1PreTrainedModel): def __init__(self, config: MobileNetV1Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.mobilenet_v1 = MobileNetV1Model(config) last_hidden_size = self.mobilenet_v1.layer[-1].convolution.out_channels # Classifier head self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True) self.classifier = nn.Linear(last_hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, labels: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilenet_v1(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(self.dropout(pooled_output)) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) __all__ = [ "MobileNetV1ForImageClassification", "MobileNetV1Model", "MobileNetV1PreTrainedModel", "load_tf_weights_in_mobilenet_v1", ]
transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py/0
{ "file_path": "transformers/src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py", "repo_id": "transformers", "token_count": 7193 }
531
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional, Union import torch import torch.nn as nn from transformers.utils.generic import OutputRecorder, check_model_inputs from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...configuration_utils import PretrainedConfig from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_rope_utils import rope_config_validation from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg from ..glm.modeling_glm import GlmAttention, GlmRotaryEmbedding, apply_rotary_pos_emb from ..llama.modeling_llama import LlamaDecoderLayer, LlamaModel, eager_attention_forward from ..whisper.modeling_whisper import WhisperModel, shift_tokens_right logger = logging.get_logger(__name__) class MoonshineConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Moonshine [UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32768): Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MoonshineModel`]. hidden_size (`int`, *optional*, defaults to 288): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 1152): Dimension of the MLP representations. encoder_num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer decoder. encoder_num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. encoder_num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if `encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. decoder_num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if `decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `decoder_num_attention_heads`. pad_head_dim_to_multiple_of (`int`, *optional*): Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain optimized attention implementations. encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_start_token_id (`int`, *optional*, defaults to 1): Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids` are provided to the `generate` function. It is used to guide the model`s generation process depending on the task. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE partial_rotary_factor (`float`, *optional*, defaults to 0.9): Percentage of the query and keys which will have rotary embedding. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder/decoder or not. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. bos_token_id (`int`, *optional*, defaults to 1): Denotes beginning of sequences token id. eos_token_id (`int`, *optional*, defaults to 2): Denotes end of sequences token id. Example: ```python >>> from transformers import MoonshineModel, MoonshineConfig >>> # Initializing a Moonshine style configuration >>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny") >>> # Initializing a model from the configuration >>> model = MoonshineModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "moonshine" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_key_value_heads": "encoder_num_key_value_heads", "num_attention_heads": "encoder_num_attention_heads", "num_hidden_layers": "encoder_num_hidden_layers", } def __init__( self, vocab_size=32768, hidden_size=288, intermediate_size=1152, encoder_num_hidden_layers=6, decoder_num_hidden_layers=6, encoder_num_attention_heads=8, decoder_num_attention_heads=8, encoder_num_key_value_heads=None, decoder_num_key_value_heads=None, pad_head_dim_to_multiple_of=None, encoder_hidden_act="gelu", decoder_hidden_act="silu", max_position_embeddings=512, initializer_range=0.02, decoder_start_token_id=1, use_cache=True, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=0.9, is_encoder_decoder=True, attention_bias=False, attention_dropout=0.0, bos_token_id=1, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.encoder_num_hidden_layers = encoder_num_hidden_layers self.decoder_num_hidden_layers = decoder_num_hidden_layers self.encoder_num_attention_heads = encoder_num_attention_heads self.decoder_num_attention_heads = decoder_num_attention_heads if encoder_num_key_value_heads is None: encoder_num_key_value_heads = encoder_num_attention_heads self.encoder_num_key_value_heads = encoder_num_key_value_heads if decoder_num_key_value_heads is None: decoder_num_key_value_heads = decoder_num_attention_heads self.decoder_num_key_value_heads = decoder_num_key_value_heads self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of self.encoder_hidden_act = encoder_hidden_act self.decoder_hidden_act = decoder_hidden_act self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.partial_rotary_factor = partial_rotary_factor self.is_encoder_decoder = is_encoder_decoder self.attention_bias = attention_bias self.attention_dropout = attention_dropout # Validate the correctness of rotary position embeddings parameters rope_config_validation(self) super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) class MoonshineEncoderMLP(nn.Module): def __init__(self, config, hidden_act): super().__init__() self.config = config self.activation_fn = ACT2FN[hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class MoonshineDecoderMLP(nn.Module): def __init__(self, config, hidden_act): super().__init__() self.config = config self.activation_fn = ACT2FN[hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size * 2) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states, gate = hidden_states.chunk(2, dim=-1) hidden_states = self.activation_fn(gate) * hidden_states hidden_states = self.fc2(hidden_states) return hidden_states class MoonshineAttention(GlmAttention): def __init__( self, config: MoonshineConfig, layer_idx: int, is_causal: bool, num_attention_heads: int, num_key_value_heads: int, ): config.update({"num_attention_heads": num_attention_heads, "num_key_value_heads": num_key_value_heads}) super().__init__(config, layer_idx) self.is_causal = is_causal self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) # Pad head dimension to the next specified multiple. if self.config.pad_head_dim_to_multiple_of is not None: target_multiple = self.config.pad_head_dim_to_multiple_of target_head_dim = target_multiple * ((self.head_dim + target_multiple - 1) // target_multiple) self.head_dim_padding = target_head_dim - self.head_dim else: self.head_dim_padding = 0 @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, key_value_states: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len = hidden_states.shape[:-1] query_states = ( self.q_proj(hidden_states).view(bsz, q_len, self.config.num_key_value_heads, self.head_dim).transpose(1, 2) ) is_cross_attention = key_value_states is not None if past_key_values is not None: is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache past_key_values.is_updated[self.layer_idx] = True past_key_values = past_key_values.cross_attention_cache else: past_key_values = past_key_values.self_attention_cache # use key_value_states if cross attention current_states = key_value_states if key_value_states is not None else hidden_states if is_cross_attention and past_key_values and is_updated: key_states = past_key_values.layers[self.layer_idx].keys value_states = past_key_values.layers[self.layer_idx].values else: key_states = ( self.k_proj(current_states) .view(bsz, -1, self.config.num_key_value_heads, self.head_dim) .transpose(1, 2) ) value_states = ( self.v_proj(current_states) .view(bsz, -1, self.config.num_key_value_heads, self.head_dim) .transpose(1, 2) ) if is_cross_attention and past_key_values is not None: key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) if not is_cross_attention: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, cache_kwargs ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] is_causal = self.is_causal and attention_mask is None and q_len > 1 if self.head_dim_padding > 0: query_states = torch.nn.functional.pad(query_states, (0, self.head_dim_padding)) key_states = torch.nn.functional.pad(key_states, (0, self.head_dim_padding)) value_states = torch.nn.functional.pad(value_states, (0, self.head_dim_padding)) attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, is_causal=is_causal, **kwargs, ) if self.head_dim_padding > 0: attn_output = attn_output[..., : -self.head_dim_padding] attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class MoonshineRotaryEmbedding(GlmRotaryEmbedding): pass class MoonshineEncoderLayer(LlamaDecoderLayer): def __init__(self, config: MoonshineConfig, layer_idx: int): super().__init__(config, layer_idx) self.self_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.encoder_num_attention_heads, num_key_value_heads=config.encoder_num_key_value_heads, ) self.mlp = MoonshineEncoderMLP(config, config.encoder_hidden_act) self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False) class MoonshineDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MoonshineConfig, layer_idx: Optional[int] = None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=True, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads, ) self.encoder_attn = MoonshineAttention( config=config, layer_idx=layer_idx, is_causal=False, num_attention_heads=config.decoder_num_attention_heads, num_key_value_heads=config.decoder_num_key_value_heads, ) self.mlp = MoonshineDecoderMLP(config, config.decoder_hidden_act) self.input_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, bias=False) self.final_layernorm = nn.LayerNorm(config.hidden_size, bias=False) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, encoder_position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, encoder_position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states, _ = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring class MoonshinePreTrainedModel(PreTrainedModel): config: MoonshineConfig base_model_prefix = "model" main_input_name = "input_values" supports_gradient_checkpointing = True _no_split_modules = ["MoonshineEncoderLayer", "MoonshineDecoderLayer"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True # TODO arthur, how do we separate when it cross / self coming from different layer? def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ output_conv1_length = int((input_lengths - 127) / 64 + 1) output_conv2_length = int((output_conv1_length - 7) / 3 + 1) output_conv3_length = int((output_conv2_length - 3) / 2 + 1) return output_conv3_length class MoonshineEncoder(MoonshinePreTrainedModel): """ Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`] Args: config: MoonshineConfig """ main_input_name = "input_values" _can_record_outputs = { "attentions": MoonshineAttention, "hidden_states": MoonshineEncoderLayer, } def __init__(self, config: MoonshineConfig): super().__init__(config) self.config = config embed_dim = config.hidden_size self.conv1 = nn.Conv1d(1, embed_dim, kernel_size=127, stride=64, bias=False) self.conv2 = nn.Conv1d(embed_dim, 2 * embed_dim, kernel_size=7, stride=3) self.conv3 = nn.Conv1d(2 * embed_dim, embed_dim, kernel_size=3, stride=2) self.groupnorm = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=1e-5) self.rotary_emb = MoonshineRotaryEmbedding(config=config) self.layers = nn.ModuleList( [MoonshineEncoderLayer(config, idx) for idx in range(config.encoder_num_hidden_layers)] ) self.layer_norm = nn.LayerNorm(embed_dim, bias=False) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self) -> nn.Module: return self.conv1 def set_input_embeddings(self, value: nn.Module): self.conv1 = value @check_model_inputs def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ input_values = input_values.unsqueeze(1) hidden_states = nn.functional.tanh(self.conv1(input_values)) hidden_states = self.groupnorm(hidden_states) hidden_states = nn.functional.gelu(self.conv2(hidden_states)) hidden_states = nn.functional.gelu(self.conv3(hidden_states)) hidden_states = hidden_states.permute(0, 2, 1) # attention mask downsampling if attention_mask is not None: mask_len = self._get_feat_extract_output_lengths(attention_mask.shape[-1]) downsample_stride = 64 * 3 * 2 # conv strides attention_mask = attention_mask[..., ::downsample_stride][..., :mask_len] if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if (attention_mask == 0.0).any() else None elif self.config._attn_implementation == "sdpa": attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, hidden_states.dtype) else: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) position_ids = torch.arange(0, hidden_states.shape[1], device=hidden_states.device).unsqueeze(0) position_embeddings = self.rotary_emb(hidden_states, position_ids) for encoder_layer in self.layers: hidden_states = encoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.layer_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, ) class MoonshineDecoder(LlamaModel): main_input_name = "input_ids" _can_record_outputs = { "attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="self_attn"), "hidden_states": MoonshineDecoderLayer, "cross_attentions": OutputRecorder(MoonshineAttention, index=1, layer_name="encoder_attn"), } def __init__(self, config: MoonshineConfig): super().__init__(config) self.norm = nn.LayerNorm(config.hidden_size, bias=False) self.layers = nn.ModuleList( [MoonshineDecoderLayer(config, idx) for idx in range(config.decoder_num_hidden_layers)] ) @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) if encoder_attention_mask is not None: mask_len = encoder_hidden_states.shape[-2] downsample_stride = 64 * 3 * 2 # conv strides encoder_attention_mask = encoder_attention_mask[..., ::downsample_stride][..., :mask_len] if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if (encoder_attention_mask == 0.0).any() else None elif self.config._attn_implementation == "sdpa": encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2] ) else: encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, hidden_states.dtype, hidden_states.shape[-2] ) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, causal_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class MoonshineModel(WhisperModel): @can_return_tuple @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[tuple[torch.LongTensor]] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Seq2SeqModelOutput: r""" input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`): Indices of positions of each input sequence tokens in the position embeddings. Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings` Example: ```python >>> import torch >>> from transformers import AutoFeatureExtractor, MoonshineModel >>> from datasets import load_dataset >>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_values = inputs.input_values >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 288] ``` """ if encoder_outputs is None: encoder_outputs: BaseModelOutput = self.encoder(input_values, attention_mask=attention_mask, **kwargs) decoder_outputs: BaseModelOutputWithPastAndCrossAttentions = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_attention_mask=attention_mask, encoder_hidden_states=encoder_outputs.last_hidden_state, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs, ) return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The Moonshine Model with a language modeling head. Can be used for automatic speech recognition. """ ) class MoonshineForConditionalGeneration(MoonshinePreTrainedModel, GenerationMixin): _tied_weights_keys = ["proj_out.weight"] def __init__(self, config: MoonshineConfig): super().__init__(config) self.model = MoonshineModel(config) self.proj_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def get_output_embeddings(self): return self.proj_out def set_output_embeddings(self, new_embeddings): self.proj_out = new_embeddings def get_input_embeddings(self) -> nn.Module: return self.model.get_input_embeddings() @can_return_tuple @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Union[EncoderDecoderCache, tuple[torch.FloatTensor]]] = None, decoder_inputs_embeds: Optional[tuple[torch.FloatTensor]] = None, decoder_position_ids: Optional[tuple[torch.LongTensor]] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Seq2SeqLMOutput: r""" input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`): Float values of the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoFeatureExtractor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`): Indices of positions of each input sequence tokens in the position embeddings. Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings` Example: ```python >>> import torch >>> from transformers import AutoProcessor, MoonshineForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny") >>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_values = inputs.input_values >>> generated_ids = model.generate(input_values, max_new_tokens=100) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription 'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ```""" if labels is not None: if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs: Seq2SeqModelOutput = self.model( input_values, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, decoder_position_ids=decoder_position_ids, use_cache=use_cache, cache_position=cache_position, **kwargs, ) logits = self.proj_out(outputs.last_hidden_state) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size) return Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) __all__ = [ "MoonshineConfig", "MoonshineModel", "MoonshinePreTrainedModel", "MoonshineForConditionalGeneration", ]
transformers/src/transformers/models/moonshine/modular_moonshine.py/0
{ "file_path": "transformers/src/transformers/models/moonshine/modular_moonshine.py", "repo_id": "transformers", "token_count": 18599 }
532
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MRA checkpoints from the original repository. URL: https://github.com/mlpen/mra-attention""" import argparse import torch from transformers import MraConfig, MraForMaskedLM def rename_key(orig_key): if "model" in orig_key: orig_key = orig_key.replace("model.", "") if "norm1" in orig_key: orig_key = orig_key.replace("norm1", "attention.output.LayerNorm") if "norm2" in orig_key: orig_key = orig_key.replace("norm2", "output.LayerNorm") if "norm" in orig_key: orig_key = orig_key.replace("norm", "LayerNorm") if "transformer" in orig_key: layer_num = orig_key.split(".")[0].split("_")[-1] orig_key = orig_key.replace(f"transformer_{layer_num}", f"encoder.layer.{layer_num}") if "mha.attn" in orig_key: orig_key = orig_key.replace("mha.attn", "attention.self") if "mha" in orig_key: orig_key = orig_key.replace("mha", "attention") if "W_q" in orig_key: orig_key = orig_key.replace("W_q", "self.query") if "W_k" in orig_key: orig_key = orig_key.replace("W_k", "self.key") if "W_v" in orig_key: orig_key = orig_key.replace("W_v", "self.value") if "ff.0" in orig_key: orig_key = orig_key.replace("ff.0", "intermediate.dense") if "ff.2" in orig_key: orig_key = orig_key.replace("ff.2", "output.dense") if "ff" in orig_key: orig_key = orig_key.replace("ff", "output.dense") if "mlm_class" in orig_key: orig_key = orig_key.replace("mlm.mlm_class", "cls.predictions.decoder") if "mlm" in orig_key: orig_key = orig_key.replace("mlm", "cls.predictions.transform") if "backbone.backbone.encoders" in orig_key: orig_key = orig_key.replace("backbone.backbone.encoders", "encoder.layer") if "cls" not in orig_key: orig_key = "mra." + orig_key return orig_key def convert_checkpoint_helper(max_position_embeddings, orig_state_dict): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if ("pooler" in key) or ("sen_class" in key): continue else: orig_state_dict[rename_key(key)] = val orig_state_dict["cls.predictions.bias"] = orig_state_dict["cls.predictions.decoder.bias"] orig_state_dict["mra.embeddings.position_ids"] = torch.arange(max_position_embeddings).expand((1, -1)) + 2 return orig_state_dict def convert_mra_checkpoint(checkpoint_path, mra_config_file, pytorch_dump_path): orig_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model_state_dict"] config = MraConfig.from_json_file(mra_config_file) model = MraForMaskedLM(config) new_state_dict = convert_checkpoint_helper(config.max_position_embeddings, orig_state_dict) print(model.load_state_dict(new_state_dict)) model.eval() model.save_pretrained(pytorch_dump_path) print(f"Checkpoint successfully converted. Model saved at {pytorch_dump_path}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to Mra pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for Mra model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_mra_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
transformers/src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mra/convert_mra_pytorch_to_pytorch.py", "repo_id": "transformers", "token_count": 1721 }
533
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Musicgen Melody checkpoints from the original repository.""" import argparse from collections import OrderedDict from pathlib import Path import torch from audiocraft.models import MusicGen from transformers import ( AutoTokenizer, EncodecModel, T5EncoderModel, ) from transformers.models.musicgen_melody.configuration_musicgen_melody import MusicgenMelodyDecoderConfig from transformers.models.musicgen_melody.feature_extraction_musicgen_melody import MusicgenMelodyFeatureExtractor from transformers.models.musicgen_melody.modeling_musicgen_melody import ( MusicgenMelodyForCausalLM, MusicgenMelodyForConditionalGeneration, ) from transformers.models.musicgen_melody.processing_musicgen_melody import MusicgenMelodyProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) EXPECTED_MISSING_KEYS = ["model.decoder.embed_positions.weights"] EXPECTED_ADDITIONAL_KEYS = ["condition_provider.conditioners.self_wav.chroma.spec.window"] def rename_keys(name): if "emb" in name: name = name.replace("emb", "model.decoder.embed_tokens") if "transformer" in name: name = name.replace("transformer", "model.decoder") if "cross_attention" in name: name = name.replace("cross_attention", "encoder_attn") if "linear1" in name: name = name.replace("linear1", "fc1") if "linear2" in name: name = name.replace("linear2", "fc2") if "norm1" in name: name = name.replace("norm1", "self_attn_layer_norm") if "norm_cross" in name: name = name.replace("norm_cross", "encoder_attn_layer_norm") if "norm2" in name: name = name.replace("norm2", "final_layer_norm") if "out_norm" in name: name = name.replace("out_norm", "model.decoder.layer_norm") if "linears" in name: name = name.replace("linears", "lm_heads") if "condition_provider.conditioners.description.output_proj" in name: name = name.replace("condition_provider.conditioners.description.output_proj", "enc_to_dec_proj") if "condition_provider.conditioners.self_wav.output_proj" in name: name = name.replace("condition_provider.conditioners.self_wav.output_proj", "audio_enc_to_dec_proj") return name def rename_state_dict(state_dict: OrderedDict, hidden_size: int) -> tuple[dict, dict]: """Function that takes the fairseq MusicgenMelody state dict and renames it according to the HF module names. It further partitions the state dict into the decoder (LM) state dict, and that for the text encoder projection and for the audio encoder projection.""" keys = list(state_dict.keys()) enc_dec_proj_state_dict = {} audio_enc_to_dec_proj_state_dict = {} for key in keys: val = state_dict.pop(key) key = rename_keys(key) if "in_proj_weight" in key: # split fused qkv proj state_dict[key.replace("in_proj_weight", "q_proj.weight")] = val[:hidden_size, :] state_dict[key.replace("in_proj_weight", "k_proj.weight")] = val[hidden_size : 2 * hidden_size, :] state_dict[key.replace("in_proj_weight", "v_proj.weight")] = val[-hidden_size:, :] elif "audio_enc_to_dec_proj" in key: audio_enc_to_dec_proj_state_dict[key[len("audio_enc_to_dec_proj.") :]] = val elif "enc_to_dec_proj" in key: enc_dec_proj_state_dict[key[len("enc_to_dec_proj.") :]] = val else: state_dict[key] = val return state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict def decoder_config_from_checkpoint(checkpoint: str) -> MusicgenMelodyDecoderConfig: if checkpoint == "facebook/musicgen-melody" or checkpoint == "facebook/musicgen-stereo-melody": hidden_size = 1536 num_hidden_layers = 48 num_attention_heads = 24 elif checkpoint == "facebook/musicgen-melody-large" or checkpoint == "facebook/musicgen-stereo-melody-large": hidden_size = 2048 num_hidden_layers = 48 num_attention_heads = 32 else: raise ValueError( "Checkpoint should be one of `['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, " "or `['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` " f"for the stereo checkpoints, got {checkpoint}." ) if "stereo" in checkpoint: audio_channels = 2 num_codebooks = 8 else: audio_channels = 1 num_codebooks = 4 config = MusicgenMelodyDecoderConfig( hidden_size=hidden_size, ffn_dim=hidden_size * 4, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_codebooks=num_codebooks, audio_channels=audio_channels, ) return config @torch.no_grad() def convert_musicgen_melody_checkpoint( checkpoint, pytorch_dump_folder=None, repo_id=None, device="cpu", test_same_output=False ): fairseq_model = MusicGen.get_pretrained(checkpoint, device=args.device) decoder_config = decoder_config_from_checkpoint(checkpoint) decoder_state_dict = fairseq_model.lm.state_dict() decoder_state_dict, enc_dec_proj_state_dict, audio_enc_to_dec_proj_state_dict = rename_state_dict( decoder_state_dict, hidden_size=decoder_config.hidden_size ) text_encoder = T5EncoderModel.from_pretrained("t5-base") audio_encoder = EncodecModel.from_pretrained("facebook/encodec_32khz") decoder = MusicgenMelodyForCausalLM(decoder_config).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection missing_keys, unexpected_keys = decoder.load_state_dict(decoder_state_dict, strict=False) for key in missing_keys.copy(): if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(key) for key in unexpected_keys.copy(): if key in EXPECTED_ADDITIONAL_KEYS: unexpected_keys.remove(key) if len(missing_keys) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}") if len(unexpected_keys) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}") # init the composite model model = MusicgenMelodyForConditionalGeneration( text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder ).to(args.device) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(enc_dec_proj_state_dict) # load the pre-trained audio encoder projection (from the decoder state dict) model.audio_enc_to_dec_proj.load_state_dict(audio_enc_to_dec_proj_state_dict) # check we can do a forward pass input_ids = torch.arange(0, 2 * decoder_config.num_codebooks, dtype=torch.long).reshape(2, -1).to(device) decoder_input_ids = input_ids.reshape(2 * decoder_config.num_codebooks, -1).to(device) with torch.no_grad(): logits = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits output_length = 1 + input_ids.shape[1] + model.config.chroma_length if logits.shape != (2 * decoder_config.num_codebooks, output_length, 2048): raise ValueError("Incorrect shape for logits") # now construct the processor tokenizer = AutoTokenizer.from_pretrained("t5-base") feature_extractor = MusicgenMelodyFeatureExtractor() processor = MusicgenMelodyProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) # set the appropriate bos/pad token ids model.generation_config.decoder_start_token_id = 2048 model.generation_config.pad_token_id = 2048 # set other default generation config params model.generation_config.max_length = int(30 * audio_encoder.config.frame_rate) model.generation_config.do_sample = True model.generation_config.guidance_scale = 3.0 if test_same_output: # check same output than original model decoder_input_ids = torch.ones_like(decoder_input_ids).to(device) * model.generation_config.pad_token_id with torch.no_grad(): decoder_input_ids = decoder_input_ids[: decoder_config.num_codebooks] inputs = processor(text=["gen"], return_tensors="pt", padding=True).to(device) logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits attributes, prompt_tokens = fairseq_model._prepare_tokens_and_attributes(["gen"], None) original_logits = fairseq_model.lm.forward( decoder_input_ids.reshape(1, decoder_config.num_codebooks, -1), attributes ) torch.testing.assert_close( original_logits.squeeze(2).reshape(decoder_config.num_codebooks, -1), logits[:, -1], rtol=1e-5, atol=5e-5, ) if pytorch_dump_folder is not None: Path(pytorch_dump_folder).mkdir(exist_ok=True) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}") model.save_pretrained(pytorch_dump_folder) processor.save_pretrained(pytorch_dump_folder) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}") model.push_to_hub(repo_id, create_pr=True) processor.push_to_hub(repo_id, create_pr=True) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="facebook/musicgen-melody", type=str, help="Checkpoint size of the Musicgen Melody model you'd like to convert. Can be one of: " "`['facebook/musicgen-melody', 'facebook/musicgen-melody-large']` for the mono checkpoints, or " "`['facebook/musicgen-stereo-melody', 'facebook/musicgen-stereo-melody-large']` " "for the stereo checkpoints.", ) parser.add_argument( "--pytorch_dump_folder", default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default="musicgen-melody", type=str, help="Where to upload the converted model on the 🤗 hub.", ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) parser.add_argument("--test_same_output", default=False, type=bool, help="If `True`, test if same output logits.") args = parser.parse_args() convert_musicgen_melody_checkpoint( args.checkpoint, args.pytorch_dump_folder, args.push_to_hub, args.device, args.test_same_output )
transformers/src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py/0
{ "file_path": "transformers/src/transformers/models/musicgen_melody/convert_musicgen_melody_transformers.py", "repo_id": "transformers", "token_count": 4490 }
534
# coding=utf-8 # Copyright 2022 UW-Madison The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Nystromformer model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( auto_docstring, logging, ) from .configuration_nystromformer import NystromformerConfig logger = logging.get_logger(__name__) class NystromformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False ) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False, ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NystromformerSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.num_landmarks = config.num_landmarks self.seq_len = config.segment_means_seq_len self.conv_kernel_size = config.conv_kernel_size if config.inv_coeff_init_option: self.init_option = config["inv_init_coeff_option"] else: self.init_option = "original" self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.conv_kernel_size is not None: self.conv = nn.Conv2d( in_channels=self.num_attention_heads, out_channels=self.num_attention_heads, kernel_size=(self.conv_kernel_size, 1), padding=(self.conv_kernel_size // 2, 0), bias=False, groups=self.num_attention_heads, ) # Function to approximate Moore-Penrose inverse via the iterative method def iterative_inv(self, mat, n_iter=6): identity = torch.eye(mat.size(-1), device=mat.device) key = mat # The entries of key are positive and ||key||_{\infty} = 1 due to softmax if self.init_option == "original": # This original implementation is more conservative to compute coefficient of Z_0. value = 1 / torch.max(torch.sum(key, dim=-2)) * key.transpose(-1, -2) else: # This is the exact coefficient computation, 1 / ||key||_1, of initialization of Z_0, leading to faster convergence. value = 1 / torch.max(torch.sum(key, dim=-2), dim=-1).values[:, :, None, None] * key.transpose(-1, -2) for _ in range(n_iter): key_value = torch.matmul(key, value) value = torch.matmul( 0.25 * value, 13 * identity - torch.matmul(key_value, 15 * identity - torch.matmul(key_value, 7 * identity - key_value)), ) return value def forward(self, hidden_states, attention_mask=None, output_attentions=False): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = query_layer / math.sqrt(math.sqrt(self.attention_head_size)) key_layer = key_layer / math.sqrt(math.sqrt(self.attention_head_size)) if self.num_landmarks == self.seq_len: attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in NystromformerModel forward() function) attention_scores = attention_scores + attention_mask attention_probs = nn.functional.softmax(attention_scores, dim=-1) context_layer = torch.matmul(attention_probs, value_layer) else: q_landmarks = query_layer.reshape( -1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size, ).mean(dim=-2) k_landmarks = key_layer.reshape( -1, self.num_attention_heads, self.num_landmarks, self.seq_len // self.num_landmarks, self.attention_head_size, ).mean(dim=-2) kernel_1 = torch.nn.functional.softmax(torch.matmul(query_layer, k_landmarks.transpose(-1, -2)), dim=-1) kernel_2 = torch.nn.functional.softmax(torch.matmul(q_landmarks, k_landmarks.transpose(-1, -2)), dim=-1) attention_scores = torch.matmul(q_landmarks, key_layer.transpose(-1, -2)) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in NystromformerModel forward() function) attention_scores = attention_scores + attention_mask kernel_3 = nn.functional.softmax(attention_scores, dim=-1) attention_probs = torch.matmul(kernel_1, self.iterative_inv(kernel_2)) new_value_layer = torch.matmul(kernel_3, value_layer) context_layer = torch.matmul(attention_probs, new_value_layer) if self.conv_kernel_size is not None: context_layer += self.conv(value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class NystromformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NystromformerAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = NystromformerSelfAttention(config, position_embedding_type=position_embedding_type) self.output = NystromformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_outputs = self.self(hidden_states, attention_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Nystromformer class NystromformerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nystromformer class NystromformerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class NystromformerLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = NystromformerAttention(config) self.add_cross_attention = config.add_cross_attention self.intermediate = NystromformerIntermediate(config) self.output = NystromformerOutput(config) def forward(self, hidden_states, attention_mask=None, output_attentions=False): self_attention_outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class NystromformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([NystromformerLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Nystromformer class NystromformerPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Nystromformer class NystromformerLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = NystromformerPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Nystromformer class NystromformerOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = NystromformerLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores @auto_docstring class NystromformerPreTrainedModel(PreTrainedModel): config: NystromformerConfig base_model_prefix = "nystromformer" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class NystromformerModel(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = NystromformerEmbeddings(config) self.encoder = NystromformerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @auto_docstring class NystromformerForMaskedLM(NystromformerPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder"] def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.cls = NystromformerOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class NystromformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring( custom_intro=""" Nyströmformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class NystromformerForSequenceClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.classifier = NystromformerClassificationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class NystromformerForMultipleChoice(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.nystromformer = NystromformerModel(config) self.pre_classifier = nn.Linear(config.hidden_size, config.hidden_size) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_state = outputs[0] # (bs * num_choices, seq_len, dim) pooled_output = hidden_state[:, 0] # (bs * num_choices, dim) pooled_output = self.pre_classifier(pooled_output) # (bs * num_choices, dim) pooled_output = nn.ReLU()(pooled_output) # (bs * num_choices, dim) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class NystromformerForTokenClassification(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class NystromformerForQuestionAnswering(NystromformerPreTrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.nystromformer = NystromformerModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nystromformer( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "NystromformerForMaskedLM", "NystromformerForMultipleChoice", "NystromformerForQuestionAnswering", "NystromformerForSequenceClassification", "NystromformerForTokenClassification", "NystromformerLayer", "NystromformerModel", "NystromformerPreTrainedModel", ]
transformers/src/transformers/models/nystromformer/modeling_nystromformer.py/0
{ "file_path": "transformers/src/transformers/models/nystromformer/modeling_nystromformer.py", "repo_id": "transformers", "token_count": 18582 }
535
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """OmDet-Turbo model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class OmDetTurboConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`]. It is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture Instantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo [omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`PretrainedConfig`, *optional*): The configuration of the text backbone. backbone_config (`PretrainedConfig`, *optional*): The configuration of the vision backbone. use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether to use the timm for the vision backbone. backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`): The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized backbone with the same architecture `backbone` is used. backbone_kwargs (`dict`, *optional*): Additional kwargs for the vision backbone. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use a pretrained vision backbone. apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`): Whether to apply layer normalization on the feature maps of the vision backbone output. image_size (`int`, *optional*, defaults to 640): The size (resolution) of each image. disable_custom_kernels (`bool`, *optional*, defaults to `False`): Whether to disable custom kernels. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon value for layer normalization. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon value for batch normalization. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. text_projection_in_dim (`int`, *optional*, defaults to 512): The input dimension for the text projection. text_projection_out_dim (`int`, *optional*, defaults to 512): The output dimension for the text projection. task_encoder_hidden_dim (`int`, *optional*, defaults to 1024): The feedforward dimension for the task encoder. class_embed_dim (`int`, *optional*, defaults to 512): The dimension of the classes embeddings. class_distance_type (`str`, *optional*, defaults to `"cosine"`): The type of of distance to compare predicted classes to projected classes embeddings. Can be `"cosine"` or `"dot"`. num_queries (`int`, *optional*, defaults to 900): The number of queries. csp_activation (`str`, *optional*, defaults to `"silu"`): The activation function of the Cross Stage Partial (CSP) networks of the encoder. conv_norm_activation (`str`, *optional*, defaults to `"gelu"`): The activation function of the ConvNormLayer layers of the encoder. encoder_feedforward_activation (`str`, *optional*, defaults to `"relu"`): The activation function for the feedforward network of the encoder. encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0): The dropout rate following the activation of the encoder feedforward network. encoder_dropout (`float`, *optional*, defaults to 0.0): The dropout rate of the encoder multi-head attention module. hidden_expansion (`int`, *optional*, defaults to 1): The hidden expansion of the CSP networks in the encoder. vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`): The projected vision features channels used as inputs for the decoder. encoder_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the encoder. encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`): The input channels for the encoder. encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`): The indices of the input features projected by each layers. encoder_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads for the encoder. encoder_dim_feedforward (`int`, *optional*, defaults to 2048): The feedforward dimension for the encoder. encoder_layers (`int`, *optional*, defaults to 1): The number of layers in the encoder. positional_encoding_temperature (`int`, *optional*, defaults to 10000): The positional encoding temperature in the encoder. num_feature_levels (`int`, *optional*, defaults to 3): The number of feature levels for the multi-scale deformable attention module of the decoder. decoder_hidden_dim (`int`, *optional*, defaults to 256): The hidden dimension of the decoder. decoder_num_heads (`int`, *optional*, defaults to 8): The number of heads for the decoder. decoder_num_layers (`int`, *optional*, defaults to 6): The number of layers for the decoder. decoder_activation (`str`, *optional*, defaults to `"relu"`): The activation function for the decoder. decoder_dim_feedforward (`int`, *optional*, defaults to 2048): The feedforward dimension for the decoder. decoder_num_points (`int`, *optional*, defaults to 4): The number of points sampled in the decoder multi-scale deformable attention module. decoder_dropout (`float`, *optional*, defaults to 0.0): The dropout rate for the decoder. eval_size (`tuple[int, int]`, *optional*): Height and width used to computes the effective height and width of the position embeddings after taking into account the stride (see RTDetr). learn_initial_query (`bool`, *optional*, defaults to `False`): Whether to learn the initial query. cache_size (`int`, *optional*, defaults to 100): The cache size for the classes and prompts caches. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is used as an encoder-decoder model or not. kwargs (`dict[str, Any]`, *optional*): Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration and can be used to control the model outputs. Examples: ```python >>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection >>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration >>> configuration = OmDetTurboConfig() >>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration >>> model = OmDetTurboForObjectDetection(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "omdet-turbo" attribute_map = { "encoder_hidden_dim": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, text_config=None, backbone_config=None, use_timm_backbone=True, backbone="swin_tiny_patch4_window7_224", backbone_kwargs=None, use_pretrained_backbone=False, apply_layernorm_after_vision_backbone=True, image_size=640, disable_custom_kernels=False, layer_norm_eps=1e-5, batch_norm_eps=1e-5, init_std=0.02, text_projection_in_dim=512, text_projection_out_dim=512, task_encoder_hidden_dim=1024, class_embed_dim=512, class_distance_type="cosine", num_queries=900, csp_activation="silu", conv_norm_activation="gelu", encoder_feedforward_activation="relu", encoder_feedforward_dropout=0.0, encoder_dropout=0.0, hidden_expansion=1, vision_features_channels=[256, 256, 256], encoder_hidden_dim=256, encoder_in_channels=[192, 384, 768], encoder_projection_indices=[2], encoder_attention_heads=8, encoder_dim_feedforward=2048, encoder_layers=1, positional_encoding_temperature=10000, num_feature_levels=3, decoder_hidden_dim=256, decoder_num_heads=8, decoder_num_layers=6, decoder_activation="relu", decoder_dim_feedforward=2048, decoder_num_points=4, decoder_dropout=0.0, eval_size=None, learn_initial_query=False, cache_size=100, is_encoder_decoder=True, **kwargs, ): if use_timm_backbone: if backbone_config is None: backbone_kwargs = { "out_indices": [1, 2, 3], "img_size": image_size, "always_partition": True, } elif backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `swin` vision config.") backbone_config = CONFIG_MAPPING["swin"]( window_size=7, image_size=image_size, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) if text_config is None: logger.info( "`text_config` is `None`. Initializing the config with the default `clip_text_model` text config." ) text_config = CONFIG_MAPPING["clip_text_model"]() elif isinstance(text_config, dict): text_model_type = text_config.get("model_type") text_config = CONFIG_MAPPING[text_model_type](**text_config) if class_distance_type not in ["cosine", "dot"]: raise ValueError( f"Invalid `class_distance_type`. It should be either `cosine` or `dot`, but got {class_distance_type}." ) self.text_config = text_config self.backbone_config = backbone_config self.use_timm_backbone = use_timm_backbone self.backbone = backbone self.backbone_kwargs = backbone_kwargs self.use_pretrained_backbone = use_pretrained_backbone self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone self.image_size = image_size self.disable_custom_kernels = disable_custom_kernels self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps self.init_std = init_std self.text_projection_in_dim = text_projection_in_dim self.text_projection_out_dim = text_projection_out_dim self.task_encoder_hidden_dim = task_encoder_hidden_dim self.class_embed_dim = class_embed_dim self.class_distance_type = class_distance_type self.num_queries = num_queries self.csp_activation = csp_activation self.conv_norm_activation = conv_norm_activation self.encoder_feedforward_activation = encoder_feedforward_activation self.encoder_feedforward_dropout = encoder_feedforward_dropout self.encoder_dropout = encoder_dropout self.hidden_expansion = hidden_expansion self.vision_features_channels = vision_features_channels self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels self.encoder_projection_indices = encoder_projection_indices self.encoder_attention_heads = encoder_attention_heads self.encoder_dim_feedforward = encoder_dim_feedforward self.encoder_layers = encoder_layers self.positional_encoding_temperature = positional_encoding_temperature self.num_feature_levels = num_feature_levels self.decoder_hidden_dim = decoder_hidden_dim self.decoder_num_heads = decoder_num_heads self.decoder_num_layers = decoder_num_layers self.decoder_activation = decoder_activation self.decoder_dim_feedforward = decoder_dim_feedforward self.decoder_num_points = decoder_num_points self.decoder_dropout = decoder_dropout self.eval_size = eval_size self.learn_initial_query = learn_initial_query self.cache_size = cache_size self.is_encoder_decoder = is_encoder_decoder super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def sub_configs(self): sub_configs = {} backbone_config = getattr(self, "backbone_config", None) text_config = getattr(self, "text_config", None) if isinstance(backbone_config, PretrainedConfig): sub_configs["backbone_config"] = type(backbone_config) if isinstance(text_config, PretrainedConfig): sub_configs["text_config"] = type(text_config) return sub_configs __all__ = ["OmDetTurboConfig"]
transformers/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py/0
{ "file_path": "transformers/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py", "repo_id": "transformers", "token_count": 6027 }
536
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import os import re import unicodedata from typing import Optional from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer class BasicTokenizer: """ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). Args: do_lower_case (`bool`, *optional*, defaults to `True`): Whether or not to lowercase the input when tokenizing. never_split (`Iterable`, *optional*): Collection of tokens which will never be split during tokenization. Only has an effect when `do_basic_tokenize=True` tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). do_split_on_punc (`bool`, *optional*, defaults to `True`): In some instances we want to skip the basic punctuation splitting so that later tokenization can capture the full context of the words, such as contractions. """ def __init__( self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None, do_split_on_punc=True, ): if never_split is None: never_split = [] self.do_lower_case = do_lower_case self.never_split = set(never_split) self.tokenize_chinese_chars = tokenize_chinese_chars self.strip_accents = strip_accents self.do_split_on_punc = do_split_on_punc def tokenize(self, text, never_split=None): """ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. Args: never_split (`List[str]`, *optional*) Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of token not to split. """ # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) # prevents treating the same character with different unicode codepoints as different characters unicode_normalized_text = unicodedata.normalize("NFC", text) orig_tokens = whitespace_tokenize(unicode_normalized_text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text, never_split=None): """Splits punctuation on a piece of text.""" if not self.do_split_on_punc or (never_split is not None and text in never_split): return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) or (cp >= 0x20000 and cp <= 0x2A6DF) or (cp >= 0x2A700 and cp <= 0x2B73F) or (cp >= 0x2B740 and cp <= 0x2B81F) or (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) ): return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def text_standardize(text): """ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization """ text = text.replace("—", "-") text = text.replace("–", "-") text = text.replace("―", "-") text = text.replace("…", "...") text = text.replace("´", "'") text = re.sub(r"""(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)""", r" \1 ", text) text = re.sub(r"\s*\n\s*", " \n ", text) text = re.sub(r"[^\S\n]+", " ", text) return text.strip() class OpenAIGPTTokenizer(PreTrainedTokenizer): """ Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities: - lowercases all inputs, - uses `SpaCy` tokenizer and `ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's `BasicTokenizer` if not. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs): try: import ftfy from spacy.lang.en import English _nlp = English() self.nlp = _nlp.tokenizer self.fix_text = ftfy.fix_text except ImportError: logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.") self.nlp = BasicTokenizer(do_lower_case=True) self.fix_text = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, **kwargs) @property def do_lower_case(self): return True @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" split_tokens = [] if self.fix_text is None: # Using BERT's BasicTokenizer text = self.nlp.tokenize(text) for token in text: split_tokens.extend(list(self.bpe(token).split(" "))) else: # Using SpaCy & ftfy (original tokenization process of OpenAI GPT) text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend(list(self.bpe(token.text.lower()).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an id in a token (BPE) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = "".join(tokens).replace("</w>", " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file __all__ = ["OpenAIGPTTokenizer"]
transformers/src/transformers/models/openai/tokenization_openai.py/0
{ "file_path": "transformers/src/transformers/models/openai/tokenization_openai.py", "repo_id": "transformers", "token_count": 6837 }
537
# coding=utf-8 # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PerceptionLM model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING, AutoConfig from ..timm_wrapper.configuration_timm_wrapper import TimmWrapperConfig logger = logging.get_logger(__name__) class PerceptionLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PerceptionLMForConditionalGeneration`]. It is used to instantiate an PerceptionLM model according to the specified arguments, defining the model architecture. Example models: - [facebook/Perception-LM-1B](https://huggingface.co/facebook/Perception-LM-1B). - [facebook/Perception-LM-3B](https://huggingface.co/facebook/Perception-LM-3B). - [facebook/Perception-LM-8B](https://huggingface.co/facebook/Perception-LM-8B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[TimmWrapperConfig, dict]`, *optional*, defaults to `TimmWrapperConfig()`): The config object or dictionary of the vision backbone. text_config (`Union[PretrainedConfig, dict]`, *optional*, defaults to `LlamaConfig()`): The config object or dictionary of the text backbone. vision_use_cls_token (`bool`, *optional*, defaults to `True`): Whether CLS token is used in the vision backbone. If used, we remove CLS token embedding from vision output. projector_pooling_ratio (`int`, *optional*, defaults to 1): The pooling ratio used in the multimodal projector. image_token_id (`int`, *optional*, defaults to 128002): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 128003): The video token index to encode the video prompt. """ model_type = "perception_lm" sub_configs = {"text_config": AutoConfig, "vision_config": TimmWrapperConfig} def __init__( self, vision_config=None, text_config=None, vision_use_cls_token=True, projector_pooling_ratio=1, image_token_id=128002, video_token_id=128003, **kwargs, ): self.image_token_id = image_token_id self.video_token_id = video_token_id if isinstance(vision_config, dict): vision_config = TimmWrapperConfig(**vision_config) elif isinstance(vision_config, TimmWrapperConfig): vision_config = vision_config elif vision_config is None: vision_config = TimmWrapperConfig() self.vision_config = vision_config self.vision_use_cls_token = vision_use_cls_token if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "llama") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["llama"]() self.text_config = text_config self.projector_pooling_ratio = projector_pooling_ratio super().__init__(**kwargs) __all__ = ["PerceptionLMConfig"]
transformers/src/transformers/models/perception_lm/configuration_perception_lm.py/0
{ "file_path": "transformers/src/transformers/models/perception_lm/configuration_perception_lm.py", "repo_id": "transformers", "token_count": 1412 }
538
# coding=utf-8 # Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team. # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for PhoBERT""" import os import re from shutil import copyfile from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.txt", "merges_file": "bpe.codes", } def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class PhobertTokenizer(PreTrainedTokenizer): """ Construct a PhoBERT tokenizer. Based on Byte-Pair-Encoding. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. bos_token (`st`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES def __init__( self, vocab_file, merges_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs, ): self.vocab_file = vocab_file self.merges_file = merges_file self.encoder = {} self.encoder[str(bos_token)] = 0 self.encoder[str(pad_token)] = 1 self.encoder[str(eos_token)] = 2 self.encoder[str(unk_token)] = 3 self.add_from_file(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[:-1] merges = [tuple(merge.split()[:-1]) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A PhoBERT sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + "</w>"]) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = "@@ ".join(word) word = word[:-4] self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" split_tokens = [] words = re.findall(r"\S+\n?", text) for token in words: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = " ".join(tokens).replace("@@ ", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) out_merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file): copyfile(self.merges_file, out_merge_file) return out_vocab_file, out_merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far) def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(f, "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset") return lines = f.readlines() for lineTmp in lines: line = lineTmp.strip() idx = line.rfind(" ") if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'") word = line[:idx] self.encoder[word] = len(self.encoder) __all__ = ["PhobertTokenizer"]
transformers/src/transformers/models/phobert/tokenization_phobert.py/0
{ "file_path": "transformers/src/transformers/models/phobert/tokenization_phobert.py", "repo_id": "transformers", "token_count": 5913 }
539
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from torch import nn from transformers import PLBartConfig, PLBartForConditionalGeneration, PLBartForSequenceClassification def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(k, None) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def convert_fairseq_plbart_checkpoint_from_disk( checkpoint_path, hf_config_path="uclanlp/plbart-base", finetuned=False, classification=False ): state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"] remove_ignore_keys_(state_dict) vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0] plbart_config = PLBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] if not classification: model = PLBartForConditionalGeneration(plbart_config) model.model.load_state_dict(state_dict) if finetuned: model.lm_head = make_linear_from_emb(model.model.shared) else: classification_head = {} for key, value in state_dict.copy().items(): if key.startswith("classification_heads.sentence_classification_head"): classification_head[key.replace("classification_heads.sentence_classification_head.", "")] = value state_dict.pop(key) model = PLBartForSequenceClassification(plbart_config) model.model.load_state_dict(state_dict) model.classification_head.load_state_dict(classification_head) return model if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default="uclanlp/plbart-base", type=str, help="Which huggingface architecture to use: plbart-base", ) parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint") parser.add_argument( "--classification", action="store_true", help="whether the model is a classification checkpoint" ) args = parser.parse_args() model = convert_fairseq_plbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, classification=args.classification, ) model.save_pretrained(args.pytorch_dump_folder_path)
transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py/0
{ "file_path": "transformers/src/transformers/models/plbart/convert_plbart_original_checkpoint_to_torch.py", "repo_id": "transformers", "token_count": 1331 }
540
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor class for Pop2Piano.""" import os from typing import Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...tokenization_utils import BatchEncoding, PaddingStrategy, TruncationStrategy from ...utils import TensorType from ...utils.import_utils import requires @requires(backends=("essentia", "librosa", "pretty_midi", "scipy", "torch")) class Pop2PianoProcessor(ProcessorMixin): r""" Constructs an Pop2Piano processor which wraps a Pop2Piano Feature Extractor and Pop2Piano Tokenizer into a single processor. [`Pop2PianoProcessor`] offers all the functionalities of [`Pop2PianoFeatureExtractor`] and [`Pop2PianoTokenizer`]. See the docstring of [`~Pop2PianoProcessor.__call__`] and [`~Pop2PianoProcessor.decode`] for more information. Args: feature_extractor (`Pop2PianoFeatureExtractor`): An instance of [`Pop2PianoFeatureExtractor`]. The feature extractor is a required input. tokenizer (`Pop2PianoTokenizer`): An instance of ['Pop2PianoTokenizer`]. The tokenizer is a required input. """ attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = "Pop2PianoFeatureExtractor" tokenizer_class = "Pop2PianoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__( self, audio: Union[np.ndarray, list[float], list[np.ndarray]] = None, sampling_rate: Optional[Union[int, list[int]]] = None, steps_per_beat: int = 2, resample: Optional[bool] = True, notes: Union[list, TensorType] = None, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, verbose: bool = True, **kwargs, ) -> Union[BatchFeature, BatchEncoding]: """ This method uses [`Pop2PianoFeatureExtractor.__call__`] method to prepare log-mel-spectrograms for the model, and [`Pop2PianoTokenizer.__call__`] to prepare token_ids from notes. Please refer to the docstring of the above two methods for more information. """ # Since Feature Extractor needs both audio and sampling_rate and tokenizer needs both token_ids and # feature_extractor_output, we must check for both. if (audio is None and sampling_rate is None) and (notes is None): raise ValueError( "You have to specify at least audios and sampling_rate in order to use feature extractor or " "notes to use the tokenizer part." ) if audio is not None and sampling_rate is not None: inputs = self.feature_extractor( audio=audio, sampling_rate=sampling_rate, steps_per_beat=steps_per_beat, resample=resample, **kwargs, ) if notes is not None: encoded_token_ids = self.tokenizer( notes=notes, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) if notes is None: return inputs elif audio is None or sampling_rate is None: return encoded_token_ids else: inputs["token_ids"] = encoded_token_ids["token_ids"] return inputs def batch_decode( self, token_ids, feature_extractor_output: BatchFeature, return_midi: bool = True, ) -> BatchEncoding: """ This method uses [`Pop2PianoTokenizer.batch_decode`] method to convert model generated token_ids to midi_notes. Please refer to the docstring of the above two methods for more information. """ return self.tokenizer.batch_decode( token_ids=token_ids, feature_extractor_output=feature_extractor_output, return_midi=return_midi ) def save_pretrained(self, save_directory, **kwargs): if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) return super().save_pretrained(save_directory, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args) __all__ = ["Pop2PianoProcessor"]
transformers/src/transformers/models/pop2piano/processing_pop2piano.py/0
{ "file_path": "transformers/src/transformers/models/pop2piano/processing_pop2piano.py", "repo_id": "transformers", "token_count": 2153 }
541
# coding=utf-8 # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen2.5Omni model (Audio, Image, Video).""" import math from dataclasses import dataclass from typing import Any, Callable, Optional, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import Parameter from transformers.models.llama.modeling_llama import rotate_half from transformers.models.qwen2_5_vl.configuration_qwen2_5_vl import Qwen2_5_VLVisionConfig from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import ( Qwen2_5_VisionTransformerPretrainedModel, Qwen2_5_VLAttention, Qwen2_5_VLMLP, Qwen2_5_VLPreTrainedModel, Qwen2_5_VLTextModel, Qwen2_5_VLVisionBlock, eager_attention_forward, ) from transformers.models.qwen2_audio.configuration_qwen2_audio import Qwen2AudioEncoderConfig from transformers.models.qwen2_audio.modeling_qwen2_audio import Qwen2AudioEncoderLayer from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLRotaryEmbedding from ...cache_utils import Cache from ...configuration_utils import PretrainedConfig, layer_type_validation from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutput, ModelOutput from ...modeling_rope_utils import rope_config_validation from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import ( TransformersKwargs, auto_docstring, check_torch_load_is_safe, logging, ) from ...utils.hub import cached_file logger = logging.get_logger(__name__) class Qwen2_5OmniVisionEncoderConfig(Qwen2_5_VLVisionConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerVision`]. It is used to instantiate a Qwen2.5-VL vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2.5-VL architecture. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: depth (`int`, *optional*, defaults to 32): Number of layers (depth) in the model. hidden_size (`int`, *optional*, defaults to 3584): The size of the hidden layers. hidden_act (`str`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function used in the model. Supported options include `"quick_gelu"` and others as applicable. mlp_ratio (`float`, *optional*, defaults to 4): The ratio used to determine the size of the MLP (Multi-Layer Perceptron) hidden layer. num_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer. in_channels (`int`, *optional*, defaults to 3): Number of input channels. patch_size (`int`, *optional*, defaults to 14): The size of the patches extracted from the input. spatial_merge_size (`int`, *optional*, defaults to 2): The size used for merging spatial dimensions. temporal_patch_size (`int`, *optional*, defaults to 2): The size used for patches along the temporal dimension. Example: ```python >>> from transformers import Qwen2_5OmniVisionEncoderConfig, Qwen2_5OmniVisionEncoder >>> # Initializing a Qwen2_5OmniVisionEncoderConfig >>> configuration = Qwen2_5OmniVisionEncoderConfig() >>> # Initializing a Qwen2_5OmniVisionEncoder (with random weights) >>> model = Qwen2_5OmniVisionEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_vision_encoder" def __init__( self, depth=32, hidden_size=3584, hidden_act="silu", intermediate_size=3420, num_heads=16, in_channels=3, patch_size=14, spatial_merge_size=2, temporal_patch_size=2, window_size=112, out_hidden_size=3584, fullatt_block_indexes=[7, 15, 23, 31], initializer_range=0.02, **kwargs, ): super().__init__( depth, hidden_size, hidden_act, intermediate_size, num_heads, in_channels, patch_size, spatial_merge_size, temporal_patch_size, window_size, out_hidden_size, fullatt_block_indexes, initializer_range=initializer_range, **kwargs, ) del self.tokens_per_second class Qwen2_5OmniAudioEncoderConfig(Qwen2AudioEncoderConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniAudioEncoder`]. It is used to instantiate a Qwen2.5-Omni-Thinker audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio architecture. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_mel_bins (`int`, *optional*, defaults to 128): Number of mel features used per input features. Should correspond to the value used in the `Qwen2_5OmniProcessor` class. encoder_layers (`int`, *optional*, defaults to 32): Number of encoder layers. encoder_attention_heads (`int`, *optional*, defaults to 20): Number of attention heads for each attention layer in the Transformer encoder. encoder_ffn_dim (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. d_model (`int`, *optional*, defaults to 1280): Dimensionality of the layers. dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. max_source_positions (`int`, *optional*, defaults to 1500): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. n_window (`int`, *optional*, defaults to 100): The chunk for conv and flash attn in AudioEncoder. output_dim (`int`, *optional*, defaults to 3584): The output dimension of AudioEncoder. Example: ```python >>> from transformers import Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniAudioEncoder >>> # Initializing a Qwen2_5OmniAudioEncoderConfig >>> configuration = Qwen2_5OmniAudioEncoderConfig() >>> # Initializing a Qwen2_5OmniAudioEncoder (with random weights) >>> model = Qwen2_5OmniAudioEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_audio_encoder" def __init__( self, num_mel_bins=128, encoder_layers=32, encoder_attention_heads=20, encoder_ffn_dim=5120, d_model=1280, dropout=0, attention_dropout=0, activation_function="gelu", activation_dropout=0, scale_embedding=False, initializer_range=0.02, max_source_positions=1500, n_window=100, output_dim=3584, **kwargs, ): super().__init__( num_mel_bins, encoder_layers, encoder_attention_heads, encoder_ffn_dim, d_model, dropout, attention_dropout, activation_function, activation_dropout, scale_embedding, initializer_range, max_source_positions, **kwargs, ) self.n_window = n_window self.output_dim = output_dim del self.encoder_layerdrop class Qwen2_5OmniTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerForConditionalGeneration`]. It is used to instantiate an Qwen2.5-Omni-Thinker model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 152064): Vocabulary size of the QwenOmni model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen2VLModel`] hidden_size (`int`, *optional*, defaults to 3584): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 18944): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 28): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 32768): Sliding window attention (SWA) window size. If not specified, will default to `4096`. max_window_layers (`int`, *optional*, defaults to 28): The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any additional layer afterwards will use SWA (Sliding Window Attention). layer_types (`list`, *optional*): Attention pattern for each layer. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Qwen2_5OmniThinkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig >>> # Initializing a Qwen2_5OmniAudioEncoder config >>> audio_config = Qwen2_5OmniAudioEncoderConfig() >>> # Initializing a Qwen2_5OmniVisionEncoder config >>> vision_config = Qwen2_5OmniVisionEncoderConfig() >>> # Initializing a Qwen2.5OmniThinker configuration >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, vision_config) >>> # Initializing a model from the Qwen-Omni style configuration >>> model = Qwen2_5OmniThinkerForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_text" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Qwen25OmniText` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=152064, hidden_size=3584, intermediate_size=18944, num_hidden_layers=28, num_attention_heads=28, num_key_value_heads=4, hidden_act="silu", max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, tie_word_embeddings=False, rope_theta=1000000.0, rope_scaling=None, use_sliding_window=False, sliding_window=32768, max_window_layers=28, layer_types=None, attention_dropout=0.0, **kwargs, ): super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window if self.use_sliding_window else None self.max_window_layers = max_window_layers # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_dropout = attention_dropout # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) if self.rope_scaling is None: self.rope_scaling = {"mrope_section": [16, 24, 24], "rope_type": "default", "type": "default"} self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if self.sliding_window is not None and i >= self.max_window_layers else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types) class Qwen2_5OmniThinkerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerForConditionalGeneration`]. It is used to instantiate an Qwen2.5-Omni-Thinker model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: audio_config (`dict`, *optional*): The config dictionary of the audio backbone. vision_config (`dict`, *optional*): The config dictionary of the vision backbone. text_config (`dict`, *optional*): The config dictionary of the text backbone. audio_token_index (`int`, *optional*, defaults to 151646): The audio token index to encode the audio prompt. image_token_index (`int`, *optional*, defaults to 151655): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 151656): The video token index to encode the video prompt. position_id_per_seconds (`int`, *optional*, defaults to 25): The increment of position id per second. seconds_per_chunk (`int`, *optional*, defaults to 2): The duration in seconds of the chunk of audio and video data. audio_start_token_id (`int`, *optional*, defaults to 151647): The audio start token index to encode the audio prompt. audio_end_token_id (`int`, *optional*, defaults to 151648): The audio end token index to encode the audio prompt. user_token_id (`int, *optional*, defaults to 872): The user token index to encode the user token. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Qwen2_5OmniThinkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig >>> # Initializing a Qwen2_5OmniAudioEncoder config >>> audio_config = Qwen2_5OmniAudioEncoderConfig() >>> # Initializing a Qwen2_5OmniVisionEncoder config >>> vision_config = Qwen2_5OmniVisionEncoderConfig() >>> # Initializing a Qwen2_5OmniTextConfig config >>> text_config = Qwen2_5OmniTextConfig() >>> # Initializing a Qwen2.5OmniThinker configuration >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, vision_config, text_config) >>> # Initializing a model from the Qwen-Omni style configuration >>> model = Qwen2_5OmniThinkerForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_thinker" attribute_map = { "image_token_id": "image_token_index", "video_token_id": "video_token_index", "audio_token_id": "audio_token_index", } sub_configs = { "audio_config": Qwen2_5OmniAudioEncoderConfig, "vision_config": Qwen2_5OmniVisionEncoderConfig, "text_config": Qwen2_5OmniTextConfig, } def __init__( self, audio_config=None, vision_config=None, text_config=None, audio_token_index=151646, image_token_index=151655, video_token_index=151656, position_id_per_seconds=25, seconds_per_chunk=2, audio_start_token_id=151647, audio_end_token_id=151648, user_token_id=872, initializer_range=0.02, **kwargs, ): self.audio_token_index = audio_token_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.user_token_id = user_token_id self.position_id_per_seconds = position_id_per_seconds self.seconds_per_chunk = seconds_per_chunk self.audio_start_token_id = audio_start_token_id self.audio_end_token_id = audio_end_token_id self.initializer_range = initializer_range if isinstance(vision_config, dict): vision_config = Qwen2_5OmniVisionEncoderConfig(**vision_config) elif vision_config is None: vision_config = Qwen2_5OmniVisionEncoderConfig() self.vision_config = vision_config if isinstance(audio_config, dict): audio_config = Qwen2_5OmniAudioEncoderConfig(**audio_config) elif audio_config is None: audio_config = Qwen2_5OmniAudioEncoderConfig() self.audio_config = audio_config if isinstance(text_config, dict): text_config = Qwen2_5OmniTextConfig(**text_config) elif text_config is None: text_config = Qwen2_5OmniTextConfig() self.text_config = text_config super().__init__(**kwargs) class Qwen2_5OmniTalkerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniTalkerForConditionalGeneration`]. It is used to instantiate an Qwen2.5-Omni-Talker model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker. e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: audio_token_index (`int`, *optional*, defaults to 151646): The audio token index to encode the audio prompt. image_token_index (`int`, *optional*, defaults to 151655): The image token index to encode the image prompt. video_token_index (`int`, *optional*, defaults to 151656): The video token index to encode the video prompt. vocab_size (`int`, *optional*, defaults to 8448): Vocabulary size of the QwenOmni model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen2VLModel`] tts_text_start_token_id (`int`, *optional*, defaults to 151860): The tts text start token index to encode the start of tts text. tts_text_end_token_id (`int`, *optional*, defaults to 151861): The tts text end token index to encode the end of tts text. tts_text_pad_token_id (`int`, *optional*, defaults to 151859): The tts text pad token index to encode the pad of tts text. tts_codec_start_token_id (`int`, *optional*, defaults to 8293): The tts codec start token index to encode the start of tts codec. tts_codec_end_token_id (`int`, *optional*, defaults to 8294): The tts codec end token index to encode the end of tts codec. tts_codec_pad_token_id (`int`, *optional*, defaults to 8292): The tts codec pad token index to encode the pad of tts codec. tts_codec_mask_token_id (`int`, *optional*, defaults to 8296): The tts codec mask token index to encode the mask of tts codec. vision_start_token_id (`int`, *optional*, defaults to 151652): The tts vision start token index to encode the start of vision. vision_end_token_id (`int`, *optional*, defaults to 151653): The tts vision end token index to encode the end of vision. embedding_size (`int`, *optional*, defaults to 3584): Dimension of the embedding representations. hidden_size (`int`, *optional*, defaults to 3584): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 18944): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 28): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 28): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. head_dim (`int`, *optional*, defaults to 128): The dimension of each attention head. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 32768): Sliding window attention (SWA) window size. If not specified, will default to `4096`. max_window_layers (`int`, *optional*, defaults to 28): The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any additional layer afterwards will use SWA (Sliding Window Attention). attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE position_id_per_seconds (`int`, *optional*, defaults to 25): The increment of position id per second. seconds_per_chunk (`int`, *optional*, defaults to 2): The duration in seconds of the chunk of audio and video data. audio_start_token_id (`int`, *optional*, defaults to 151647): The audio start token index to encode the audio prompt. audio_end_token_id (`int`, *optional*, defaults to 151648): The audio end token index to encode the audio prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. spatial_merge_size (`int`, *optional*, defaults to 2): The size used for merging spatial dimensions. layer_types (`list`, *optional*): Attention pattern for each layer. Example: ```python >>> from transformers import Qwen2_5OmniTalkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig >>> # Initializing a Qwen2_5OmniAudioEncoder config >>> audio_config = Qwen2_5OmniAudioEncoderConfig() >>> # Initializing a Qwen2 config >>> text_config = Qwen2Config() >>> # Initializing a Qwen2_5Omni configuration >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, text_config) >>> # Initializing a model from the qwen2-audio style configuration >>> model = Qwen2_5OmniTalkerForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen2_5_omni_talker" attribute_map = { "image_token_id": "image_token_index", "video_token_id": "video_token_index", "audio_token_id": "audio_token_index", } def __init__( self, audio_token_index=151646, image_token_index=151655, video_token_index=151656, vocab_size=8448, tts_text_start_token_id=151860, tts_text_end_token_id=151861, tts_text_pad_token_id=151859, tts_codec_start_token_id=8293, tts_codec_end_token_id=8294, tts_codec_pad_token_id=8292, tts_codec_mask_token_id=8296, vision_start_token_id=151652, vision_end_token_id=151653, embedding_size=3584, hidden_size=3584, intermediate_size=18944, num_hidden_layers=28, num_attention_heads=28, num_key_value_heads=4, hidden_act="silu", max_position_embeddings=32768, rms_norm_eps=1e-06, head_dim=128, use_cache=True, tie_word_embeddings=False, rope_theta=1000000.0, use_sliding_window=False, sliding_window=32768, max_window_layers=28, attention_dropout=0.0, rope_scaling=None, position_id_per_seconds=25, seconds_per_chunk=2, audio_start_token_id=151647, audio_end_token_id=151648, initializer_range=0.02, spatial_merge_size=2, layer_types=None, **kwargs, ): self.audio_token_index = audio_token_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.tts_text_start_token_id = tts_text_start_token_id self.tts_text_end_token_id = tts_text_end_token_id self.tts_text_pad_token_id = tts_text_pad_token_id self.tts_codec_start_token_id = tts_codec_start_token_id self.tts_codec_end_token_id = tts_codec_end_token_id self.tts_codec_pad_token_id = tts_codec_pad_token_id self.tts_codec_mask_token_id = tts_codec_mask_token_id self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.vocab_size = vocab_size self.head_dim = head_dim self.embedding_size = embedding_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window if self.use_sliding_window else None self.max_window_layers = max_window_layers # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.rope_scaling = rope_scaling self.position_id_per_seconds = position_id_per_seconds # zf self.seconds_per_chunk = seconds_per_chunk # zf self.audio_start_token_id = audio_start_token_id # zf self.audio_end_token_id = audio_end_token_id # zf self.initializer_range = initializer_range self.spatial_merge_size = spatial_merge_size self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if self.sliding_window is not None and i >= self.max_window_layers else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types) super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) class Qwen2_5OmniDiTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of the Qwen2_5OmniToken2WavDiT used in the Qwen2.5-Omni-Token2Wav model. It defines the architecture of the DiT model, which is used for generating mel-spectrograms from tokens. Args: hidden_size (`int`, *optional*, defaults to 1024): The dimension of the model. num_hidden_layers (`int`, *optional*, defaults to 22): The number of transformer blocks in the DiT model. num_attention_heads (`int`, *optional*, defaults to 16): The number of attention heads in each transformer block. ff_mult (`int`, *optional*, defaults to 2): The multiplier for the feedforward layer in each transformer block. emb_dim (`int`, *optional*, defaults to 512): The dimension of the embedding layer. head_dim (`int`, *optional*, defaults to 64): The dimension of each attention head. repeats (`int`, *optional*, defaults to 2): The number of times the codec embeddings are repeated. num_embeds (`int`, *optional*, defaults to 8193): The number of unique embeddings in the codec. mel_dim (`int`, *optional*, defaults to 80): The dimension of the mel-spectrogram. dropout (`float`, *optional*, defaults to 0.1): The dropout rate for the transformer blocks. enc_emb_dim (`int`, *optional*, defaults to 192): The dimension of the pre-trained speaker embedding. enc_dim (`int`, *optional*, defaults to 128): The dimension of the encoder output. enc_channels (`list[int]`, *optional*, defaults to `[256, 256, 256, 256, 768]`): A list of output channels for each TDNN/SERes2Net layer in the encoder. enc_kernel_sizes (`list[int]`, *optional*, defaults to `[5, 3, 3, 3, 1]`): A list of kernel sizes for each layer in the encoder. enc_dilations (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 1]`): A list of dilations for each layer in the encoder. enc_attention_channels (`int`, *optional*, defaults to 64): The number of attention channels in the SqueezeExcitationBlock. enc_res2net_scale (`int`, *optional*, defaults to 2): The scale of the Res2Net block in the encoder. enc_se_channels (`int`, *optional*, defaults to 64): The number of output channels after squeeze in the SqueezeExcitationBlock. """ model_type = "qwen2_5_omni_dit" def __init__( self, hidden_size=1024, num_hidden_layers=22, num_attention_heads=16, ff_mult=2, emb_dim=512, head_dim=64, rope_theta=10000.0, max_position_embeddings=32768, block_size=24, look_ahead_layers=[10], look_backward_layers=[0, 20], repeats=2, num_embeds=8193, mel_dim=80, dropout=0.1, enc_emb_dim=192, enc_dim=128, enc_channels=[256, 256, 256, 256, 768], enc_kernel_sizes=[5, 3, 3, 3, 1], enc_dilations=[1, 2, 3, 4, 1], enc_attention_channels=64, enc_res2net_scale=2, enc_se_channels=64, **kwargs, ): self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.ff_mult = ff_mult self.emb_dim = emb_dim self.head_dim = head_dim self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings self.block_size = block_size self.look_ahead_layers = look_ahead_layers self.look_backward_layers = look_backward_layers self.repeats = repeats self.num_embeds = num_embeds self.mel_dim = mel_dim self.dropout = dropout self.enc_emb_dim = enc_emb_dim self.enc_dim = enc_dim self.enc_channels = enc_channels self.enc_kernel_sizes = enc_kernel_sizes self.enc_dilations = enc_dilations self.enc_attention_channels = enc_attention_channels self.enc_res2net_scale = enc_res2net_scale self.enc_se_channels = enc_se_channels super().__init__(**kwargs) class Qwen2_5OmniBigVGANConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of the Qwen2_5OmniToken2WavBigVGAN module used in the Qwen2.5-Omni-Token2Wav model. It defines the architecture of the BigVGAN model, which is used for converting mel-spectrograms to waveforms. Args: mel_dim (`int`, *optional*, defaults to 80): The dimension of the mel-spectrogram. upsample_initial_channel (`int`, *optional*, defaults to 1536): The number of channels in the initial upsampling layer. resblock_kernel_sizes (`list[int]`, *optional*, defaults to `[3, 7, 11]`): A list of kernel sizes for each residual block. resblock_dilation_sizes (`list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A list of dilation sizes for each residual block. upsample_rates (`list[int]`, *optional*, defaults to `[5, 3, 2, 2, 2, 2]`): A list of upsampling rates for each upsampling layer. upsample_kernel_sizes (`list[int]`, *optional*, defaults to `[11, 7, 4, 4, 4, 4]`): A list of kernel sizes for each upsampling layer. """ model_type = "qwen2_5_omni_bigvgan" def __init__( self, mel_dim=80, upsample_initial_channel=1536, resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], upsample_rates=[5, 3, 2, 2, 2, 2], upsample_kernel_sizes=[11, 7, 4, 4, 4, 4], **kwargs, ): self.mel_dim = mel_dim self.upsample_initial_channel = upsample_initial_channel self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes super().__init__(**kwargs) class Qwen2_5OmniToken2WavConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen2_5OmniToken2WavModel`]. It is used to instantiate the Qwen2.5-Omni-Token2Wav model which combines a Diffusion Transformer (DiT) for mel-spectrogram generation with a BigVGAN model for waveform synthesis. The configuration contains sub-configurations for both components. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: dit_config ([`DiT_Args`], *optional*): Configuration class for the Diffusion Transformer (DiT) module responsible for generating mel-spectrograms. bigvgan_config ([`BigVGAN_Args`], *optional*): Configuration class for the BigVGAN module responsible for converting mel-spectrograms to waveforms. Example: ```python >>> from transformers import Qwen2_5OmniToken2WavModel, DiT_Args, BigVGAN_Args >>> # Initialize DiT configuration >>> dit_config = DiT_Args( ... dim=1024, ... depth=22, ... heads=16, ... ff_mult=2 ... ) >>> # Initialize BigVGAN configuration >>> bigvgan_config = BigVGAN_Args( ... mel_dim=80, ... upsample_rates=[5,3,2,2,2,2] ... ) >>> # Initialize main configuration >>> config = Qwen2_5OmniToken2WavConfig(dit_config, bigvgan_config) >>> # Initialize model with config >>> model = Qwen2_5OmniToken2Wav(config) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "qwen2_5_omni_token2wav" sub_configs = { "dit_config": Qwen2_5OmniDiTConfig, "bigvgan_config": Qwen2_5OmniBigVGANConfig, } def __init__(self, dit_config=None, bigvgan_config=None, **kwargs): if dit_config is None: dit_config = {} if bigvgan_config is None: bigvgan_config = {} self.dit_config = Qwen2_5OmniDiTConfig(**dit_config) self.bigvgan_config = Qwen2_5OmniBigVGANConfig(**bigvgan_config) super().__init__(**kwargs) class Qwen2_5OmniConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`Qwen2_5OmniForConditionalGeneration`]. It is used to instantiate a Qwen2.5Omni model according to the specified sub-models configurations, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: thinker_config (`dict`, *optional*): Configuration of the underlying thinker sub-model. talker_config (`dict`, *optional*): Configuration of the underlying talker sub-model. token2wav_config (`dict`, *optional*): Configuration of the underlying codec sub-model. enable_audio_output (`bool`, *optional*, defaults to `True`): Whether enable audio output and load talker and token2wav module. Example: ```python >>> from transformers import ( ... Qwen2_5OmniThinkerConfig, ... Qwen2_5OmniTalkerConfig, ... Qwen2_5OmniToken2WavConfig, ... Qwen2_5OmniForConditionalGeneration, ... Qwen2_5OmniConfig, ... ) >>> # Initializing sub-modules configurations. >>> thinker_config = Qwen2_5OmniThinkerConfig() >>> talker_config = Qwen2_5OmniTalkerConfig() >>> token2wav_config = Qwen2_5OmniToken2WavConfig() >>> # Initializing a module style configuration >>> configuration = Qwen2_5OmniConfig.from_sub_model_configs( ... thinker_config, talker_config, token2wav_config ... ) >>> # Initializing a model (with random weights) >>> model = Qwen2_5OmniForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "qwen2_5_omni" sub_configs = { "thinker_config": Qwen2_5OmniThinkerConfig, "talker_config": Qwen2_5OmniTalkerConfig, "token2wav_config": Qwen2_5OmniToken2WavConfig, } def __init__( self, thinker_config=None, talker_config=None, token2wav_config=None, enable_audio_output: bool = True, **kwargs, ): if thinker_config is None: thinker_config = {} logger.info("thinker_config is None. Initializing thinker model with default values") if talker_config is None: talker_config = {} logger.info("talker_config is None. Initializing talker model with default values") if token2wav_config is None: token2wav_config = {} logger.info("token2wav_config is None. Initializing token2wav model with default values") self.thinker_config = Qwen2_5OmniThinkerConfig(**thinker_config) self.talker_config = Qwen2_5OmniTalkerConfig(**talker_config) self.token2wav_config = Qwen2_5OmniToken2WavConfig(**token2wav_config) self.enable_audio_output = enable_audio_output super().__init__(**kwargs) def get_text_config(self, *args, **kwargs): """ Returns the config that is meant to be used with text IO. On most models, it is the original config instance itself. On specific composite models, it is under a set of valid names. Args: decoder (`Optional[bool]`, *optional*, defaults to `False`): If set to `True`, then only search for decoder config names. """ # Overridden for deeply nested config like Qwen2-Omni. We don't have any omni model # except for Qwen yet. This has to be generalized if more deeply nested configs are # added. NOTE: currently method used only by vLLM return self.thinker_config.get_text_config(*args, **kwargs) class Qwen2_5OmniPreTrainedModel(Qwen2_5_VLPreTrainedModel): config: Qwen2_5OmniConfig _can_compile_fullgraph = False class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel): def _prepare_4d_causal_attention_mask_with_cache_position( self, attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, device: torch.device, min_dtype: float, cache_position: torch.Tensor, batch_size: int, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. device (`torch.device`): The device to place the 4D attention mask on. min_dtype (`float`): The minimum value representable with the dtype `dtype`. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask def get_llm_pos_ids_for_vision( self, start_idx: int, vision_idx: int, spatial_merge_size: int, t_index: list[int], grid_hs: list[int], grid_ws: list[int], ): llm_pos_ids_list = [] llm_grid_h = grid_hs[vision_idx] // spatial_merge_size llm_grid_w = grid_ws[vision_idx] // spatial_merge_size h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten() w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten() t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().long() _llm_pos_ids = torch.stack([t_index, h_index, w_index]) llm_pos_ids_list.append(_llm_pos_ids + start_idx) # + 1 ) # 12.09 by malinhan llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1) return llm_pos_ids def get_chunked_index( self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int ) -> list[tuple[int, int]]: """ Splits token index list into chunks based on token value ranges. Given a list of token indices, returns a list of (start, end) index tuples representing slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`. For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that: - the first chunk contains token values < 1000, - the second chunk contains values >= 1000 and < 2000, and so on. Parameters: token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of token index values. t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold). remove_index (`int`) An index id to subtract from `token_indices` before chunking Returns: `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive) and end (exclusive) indices of a chunk in `token_indices`. """ def _iter(): i, start_idx = 0, 0 # skip bos token current_chunk = 1 while i < len(token_indices): # skip eos token if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk: yield (start_idx, i) start_idx = i current_chunk += 1 i += 1 yield (start_idx, len(token_indices)) return list(_iter()) def get_rope_index( self, input_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, use_audio_in_video: bool = False, audio_seqlens: Optional[torch.LongTensor] = None, second_per_grids: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Calculate the 3D rope index based on image and video's temporal, height and width in LLM. Explanation: Each embedding sequence contains vision embedding and text embedding or just contains text embedding. For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs. Examples: input_ids: [T T T T T], here T is for text. temporal position_ids: [0, 1, 2, 3, 4] height position_ids: [0, 1, 2, 3, 4] width position_ids: [0, 1, 2, 3, 4] For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part and 1D rotary position embedding for text part. Examples: Temporal (Time): 3 patches, representing different segments of the video in time. Height: 2 patches, dividing each frame vertically. Width: 2 patches, dividing each frame horizontally. We also have some important parameters: fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second. tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity. temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames. interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs. input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100] vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] text temporal position_ids: [101, 102, 103, 104, 105] text height position_ids: [101, 102, 103, 104, 105] text width position_ids: [101, 102, 103, 104, 105] Here we calculate the text start position_ids as the max vision position_ids plus 1. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. use_audio_in_video (`bool`, *optional*): If set to `True`, use the audio in video. audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*): The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. Returns: position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`) mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`) """ spatial_merge_size = self.spatial_merge_size image_token_id = self.config.image_token_id video_token_id = self.config.video_token_id audio_token_id = self.config.audio_token_id vision_start_token_id = self.config.vision_start_token_id audio_start_token_id = self.config.audio_start_token_id position_id_per_seconds = self.config.position_id_per_seconds seconds_per_chunk = self.config.seconds_per_chunk mrope_position_deltas = [] if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) position_ids = torch.ones( 3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device, ) image_idx, video_idx, audio_idx = 0, 0, 0 attention_mask = attention_mask.to(total_input_ids.device) for i, input_ids in enumerate(total_input_ids): input_ids = input_ids[attention_mask[i] == 1] image_nums, video_nums, audio_nums = 0, 0, 0 vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1) vision_tokens = input_ids[vision_start_indices + 1] audio_nums = torch.sum(input_ids == audio_start_token_id) image_nums = (vision_tokens == image_token_id).sum() video_nums = ( (vision_tokens == audio_start_token_id).sum() if use_audio_in_video else (vision_tokens == video_token_id).sum() ) input_tokens = input_ids.tolist() llm_pos_ids_list: list = [] st = 0 remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums multimodal_nums = ( image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums ) for _ in range(multimodal_nums): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 if image_token_id in input_tokens and remain_images > 0: ed_image = input_tokens.index(image_token_id, st) else: ed_image = len(input_tokens) + 1 if video_token_id in input_tokens and remain_videos > 0: ed_video = input_tokens.index(video_token_id, st) else: ed_video = len(input_tokens) + 1 if audio_token_id in input_tokens and remain_audios > 0: ed_audio = input_tokens.index(audio_token_id, st) else: ed_audio = len(input_tokens) + 1 min_ed = min(ed_image, ed_video, ed_audio) if min_ed == ed_audio: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1 llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + audio_len + eos_len audio_idx += 1 remain_audios -= 1 elif min_ed == ed_image: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 grid_t = image_grid_thw[image_idx][0] grid_hs = image_grid_thw[:, 1] grid_ws = image_grid_thw[:, 2] t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).long() llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2) llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + image_len + eos_len image_idx += 1 remain_images -= 1 elif min_ed == ed_video and not use_audio_in_video: text_len = min_ed - st - 1 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 grid_t = video_grid_thw[video_idx][0] grid_hs = video_grid_thw[:, 1] grid_ws = video_grid_thw[:, 2] t_index = ( torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds ).long() llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2) llm_pos_ids_list.append(llm_pos_ids) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len + video_len + eos_len video_idx += 1 remain_videos -= 1 elif min_ed == ed_video and use_audio_in_video: text_len = min_ed - st - 2 if text_len != 0: st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 bos_len = 1 llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1 audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx grid_t = video_grid_thw[video_idx][0] grid_hs = video_grid_thw[:, 1] grid_ws = video_grid_thw[:, 2] t_index = ( torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds ).long() video_llm_pos_ids = self.get_llm_pos_ids_for_vision( st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws ) t_ntoken_per_chunk = int(position_id_per_seconds * seconds_per_chunk) video_chunk_indexes = self.get_chunked_index(video_llm_pos_ids[0], t_ntoken_per_chunk, st_idx) audio_chunk_indexes = self.get_chunked_index(audio_llm_pos_ids[0], t_ntoken_per_chunk, st_idx) sub_len = 0 for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))): video_chunk_index = video_chunk_indexes[j] if j < len(video_chunk_indexes) else None audio_chunk_index = audio_chunk_indexes[j] if j < len(audio_chunk_indexes) else None if video_chunk_index is not None: sub_len += video_chunk_index[1] - video_chunk_index[0] llm_pos_ids_list.append( video_llm_pos_ids[:, video_chunk_index[0] : video_chunk_index[1]] ) if audio_chunk_index is not None: sub_len += audio_chunk_index[1] - audio_chunk_index[0] llm_pos_ids_list.append( audio_llm_pos_ids[:, audio_chunk_index[0] : audio_chunk_index[1]] ) video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2) st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 eos_len = 1 llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx) st += text_len + bos_len * 2 + audio_len + video_len + eos_len * 2 audio_idx += 1 video_idx += 1 remain_videos -= 1 remain_audios -= 1 if st < len(input_tokens): st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0 text_len = len(input_tokens) - st llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device) mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids)) mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1) return position_ids, mrope_position_deltas else: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device) max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0] mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True) return position_ids, mrope_position_deltas ############################ # Start Thinker # ############################ @dataclass @auto_docstring( custom_intro=""" Base class for Qwen2.5OmniThinker causal language model (or autoregressive) outputs. """ ) class Qwen2_5OmniThinkerCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None class Qwen2_5OmniAudioAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: Qwen2_5OmniAudioEncoderConfig, ): super().__init__() self.embed_dim = config.d_model self.num_heads = config.encoder_attention_heads self.dropout = config.attention_dropout self.head_dim = self.embed_dim // self.num_heads self.num_key_value_groups = 1 # needed for eager attention self.config = config if (self.head_dim * self.num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.attention_dropout = 0.0 self.is_decoder = False self.is_causal = False self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) def forward( self, hidden_states: torch.Tensor, cu_seqlens: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" seq_length, _ = hidden_states.size() query_states = self.q_proj(hidden_states).reshape(seq_length, self.num_heads, -1) key_states = self.k_proj(hidden_states).reshape(seq_length, self.num_heads, -1) value_states = self.v_proj(hidden_states).reshape(seq_length, self.num_heads, -1) query_states = query_states.transpose(0, 1).unsqueeze(0) key_states = key_states.transpose(0, 1).unsqueeze(0) value_states = value_states.transpose(0, 1).unsqueeze(0) max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask=attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, cu_seq_lens_q=cu_seqlens, # pass cu seq lens for FA2 cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs, ) attn_output = attn_output.reshape(seq_length, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output class Qwen2_5OmniAudioEncoderLayer(Qwen2AudioEncoderLayer): def __init__(self, config: Qwen2_5OmniAudioEncoderConfig): super().__init__(config) self.self_attn = Qwen2_5OmniAudioAttention(config) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states = self.self_attn( hidden_states=hidden_states, cu_seqlens=cu_seqlens, attention_mask=attention_mask, **kwargs, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16: clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) return outputs class SinusoidsPositionEmbedding(nn.Module): def __init__(self, length, channels, max_timescale=10000): super().__init__() if channels % 2 != 0: raise ValueError("SinusoidsPositionEmbedding needs even channels input") log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float()) scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] self.register_buffer( "positional_embedding", torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1), persistent=False, ) def forward(self, seqlen: int): return self.positional_embedding[:seqlen, :] @auto_docstring( custom_intro=""" Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`Qwen2_5OmniAudioEncoderLayer`]. """ ) class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniAudioEncoderConfig main_input_name = "input_features" _no_split_modules = ["Qwen2_5OmniAudioEncoderLayer"] _supports_sdpa = True def __init__(self, config: Qwen2_5OmniAudioEncoderConfig): super().__init__(config) self.dropout = config.dropout embed_dim = config.d_model self.num_mel_bins = config.num_mel_bins self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.n_window = config.n_window self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1) self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1) self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim) self.audio_bos_eos_token = nn.Embedding(2, config.output_dim) self.layers = nn.ModuleList([Qwen2_5OmniAudioEncoderLayer(config) for _ in range(config.encoder_layers)]) self.ln_post = nn.LayerNorm(config.d_model) self.avg_pooler = nn.AvgPool1d(2, stride=2) self.proj = nn.Linear(config.d_model, config.output_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def get_input_embeddings(self) -> nn.Module: return self.conv1 def set_input_embeddings(self, value: nn.Module): self.conv1 = value def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor: # Flash Attention 2 doesn't need a 4D mask and relies on `cu_seqlens/max_seqlen` # NOTE: the created attention masl only approximates the ragged FA2 attention by # allowing bidirectional attention within `cu_seqlens` blocks, and not attending between # blocks. Though it will not be a 100% match for FA2's `varlen` path if self.config._attn_implementation == "flash_attention_2": return None seq_length = inputs_tensor.shape[0] attention_mask = torch.full( [1, 1, seq_length, seq_length], torch.finfo(inputs_tensor.dtype).min, device=inputs_tensor.device, dtype=inputs_tensor.dtype, ) for i in range(1, len(cu_seqlens)): attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0 return attention_mask @auto_docstring def forward( self, input_features, feature_lens=None, aftercnn_lens=None, **kwargs, ): r""" feature_lens (`torch.LongTensor` of shape `(batch_size,)`): mel length aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`): mel length after cnn """ chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long() chunk_lengths = torch.tensor( [self.n_window * 2] * chunk_num.sum(), dtype=torch.long, device=feature_lens.device, ) tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:] chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2) chunk_lengths = torch.where(chunk_lengths == 0, self.n_window * 2, chunk_lengths) chunk_list = input_features.split(chunk_lengths.tolist(), dim=1) padded_feature, padded_mask, padded_mask_after_cnn = self.padded_and_mask_function( chunk_list, chunk_lengths, padding_value=0, padding_side="right" ) padded_embed = nn.functional.gelu(self.conv1(padded_feature)) * padded_mask padded_embed = nn.functional.gelu(self.conv2(padded_embed)).transpose(1, 2) padded_embed = padded_embed + self.positional_embedding.positional_embedding[ : padded_embed.shape[1], : ].unsqueeze(0).to(padded_embed.dtype) hidden_states = padded_embed[padded_mask_after_cnn] cu_seqlens = torch.cat( ( torch.zeros(1, device=padded_mask_after_cnn.device, dtype=torch.int32), padded_mask_after_cnn.sum(1).cumsum(0), ) ).to(torch.int32) attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens) for encoder_layer in self.layers: layer_outputs = encoder_layer( hidden_states, cu_seqlens=cu_seqlens, attention_mask=attention_mask, **kwargs, ) hidden_states = layer_outputs[0] hidden_states_list = hidden_states.split(aftercnn_lens.tolist(), dim=0) token_audio_list = [] for each_audio_states in hidden_states_list: each_audio_states = self.avg_pooler(each_audio_states.transpose(0, 1)).transpose_(0, 1) each_audio_states = self.ln_post(each_audio_states) each_audio_states = self.proj(each_audio_states) token_audio_list.append(each_audio_states) token_audio = torch.cat(token_audio_list, dim=0) return BaseModelOutput(last_hidden_state=token_audio) def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"): """ Pads a sequence of tensors to their maximum length on indicated `padding_side`. Then prepares a mask so that pad tokens are not attended to. """ max_len = tensor_len.max() dim = tensor_list[0].shape[0] padded_tensor = torch.full( size=(len(tensor_list), dim, max_len), fill_value=padding_value, dtype=self.dtype, device=tensor_list[0].device, ) batch_mask = torch.zeros( (len(tensor_len), max_len), dtype=torch.long, device=padded_tensor.device, ) for i, length in enumerate(tensor_len): batch_mask[i, :length] = 1 padded_tensor[i, :, :length] = tensor_list[i] feature_lens_after_cnn = (tensor_len - 1) // 2 + 1 max_len_after_cnn = feature_lens_after_cnn.max() batch_mask_after_cnn = torch.zeros( (len(tensor_len), max_len_after_cnn), dtype=torch.long, device=padded_tensor.device, ) for i, length in enumerate(feature_lens_after_cnn): batch_mask_after_cnn[i, :length] = 1 return ( padded_tensor, batch_mask.unsqueeze(1), batch_mask_after_cnn.bool(), ) # Ignore copy def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers and the output length of the audio encoder """ input_lengths = (input_lengths - 1) // 2 + 1 output_lengths = (input_lengths - 2) // 2 + 1 return input_lengths, output_lengths def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor: orig_dtype = tensor.dtype tensor = tensor.float() cos = freqs.cos() sin = freqs.sin() cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float() output = (tensor * cos) + (rotate_half(tensor) * sin) output = output.to(orig_dtype) return output class Qwen2_5OmniVisionAttention(nn.Module): def __init__(self, config: Qwen2_5OmniVisionEncoderConfig = None) -> None: super().__init__() self.dim = config.hidden_size self.num_heads = config.num_heads self.head_dim = self.dim // self.num_heads self.q = nn.Linear(self.dim, self.dim, bias=True) self.k = nn.Linear(self.dim, self.dim, bias=True) self.v = nn.Linear(self.dim, self.dim, bias=True) self.proj = nn.Linear(self.dim, self.dim) self.scaling = self.head_dim**-0.5 self.num_key_value_groups = 1 # needed for eager attention self.config = config self.attention_dropout = 0.0 self.is_causal = False def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: seq_length = hidden_states.shape[0] query_states = self.q(hidden_states).reshape(seq_length, self.num_heads, -1) key_states = self.k(hidden_states).reshape(seq_length, self.num_heads, -1) value_states = self.v(hidden_states).reshape(seq_length, self.num_heads, -1) query_states = apply_rotary_pos_emb_vision(query_states.unsqueeze(0), rotary_pos_emb).squeeze(0) key_states = apply_rotary_pos_emb_vision(key_states.unsqueeze(0), rotary_pos_emb).squeeze(0) query_states = query_states.transpose(0, 1).unsqueeze(0) key_states = key_states.transpose(0, 1).unsqueeze(0) value_states = value_states.transpose(0, 1).unsqueeze(0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] if self.config._attn_implementation == "flash_attention_2": # Flash Attention 2: Use cu_seqlens for variable length attention max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max() attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, cu_seq_lens_q=cu_seqlens, cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs, ) else: # Other implementations: Process each chunk separately lengths = cu_seqlens[1:] - cu_seqlens[:-1] splits = [ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states) ] attn_outputs = [ attention_interface( self, q, k, v, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, is_causal=False, **kwargs, )[0] for q, k, v in zip(*splits) ] attn_output = torch.cat(attn_outputs, dim=1) attn_output = attn_output.reshape(seq_length, -1).contiguous() attn_output = self.proj(attn_output) return attn_output class Qwen2_5OmniVisionBlock(Qwen2_5_VLVisionBlock): def __init__(self, config: Qwen2_5OmniVisionEncoderConfig) -> None: super().__init__(config, config._attn_implementation) self.attn = Qwen2_5OmniVisionAttention(config=config) def forward( self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: hidden_states = hidden_states + self.attn( self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb, **kwargs, ) hidden_states = hidden_states + self.mlp(self.norm2(hidden_states)) return hidden_states class Qwen2_5OmniVisionEncoder(Qwen2_5_VisionTransformerPretrainedModel): config: Qwen2_5OmniVisionEncoderConfig _no_split_modules = ["Qwen2_5OmniVisionBlock"] def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None: super().__init__(config, *inputs, **kwargs) self.blocks = nn.ModuleList([Qwen2_5OmniVisionBlock(config) for _ in range(config.depth)]) def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor: """ Args: hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`): The final hidden states of the model. grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`): The temporal, height and width of feature shape of each image in LLM. Returns: `torch.Tensor`: hidden_states. """ hidden_states = self.patch_embed(hidden_states) rotary_pos_emb = self.rot_pos_emb(grid_thw) window_index, cu_window_seqlens = self.get_window_index(grid_thw) cu_window_seqlens = torch.tensor( cu_window_seqlens, device=hidden_states.device, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens) seq_len, _ = hidden_states.size() hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) hidden_states = hidden_states[window_index, :, :] hidden_states = hidden_states.reshape(seq_len, -1) rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1) rotary_pos_emb = rotary_pos_emb[window_index, :, :] rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1) cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum( dim=0, # Select dtype based on the following factors: # - FA2 requires that cu_seqlens_q must have dtype int32 # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw # See https://github.com/huggingface/transformers/pull/34852 for more information dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32, ) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) # Modification here for layer_num, blk in enumerate(self.blocks): if layer_num in self.fullatt_block_indexes: cu_seqlens_now = cu_seqlens else: cu_seqlens_now = cu_window_seqlens hidden_states = blk( hidden_states, cu_seqlens=cu_seqlens_now, rotary_pos_emb=rotary_pos_emb, **kwargs, ) hidden_states = self.merger(hidden_states) reverse_indices = torch.argsort(window_index) hidden_states = hidden_states[reverse_indices, :] return hidden_states class Qwen2_5OmniRotaryEmbedding(Qwen2VLRotaryEmbedding): def __init__(self, config: Qwen2_5OmniThinkerConfig, device=None): super().__init__(config, device) # It's same as `Qwen2_5_VLAttention`, but talker model's hidden_size isn't divisible by num_heads. # Removes the value error as a workaround. class Qwen2_5OmniAttention(Qwen2_5_VLAttention, nn.Module): def __init__(self, config: Qwen2_5OmniConfig, layer_idx: Optional[int] = None): nn.Module.__init__(self) self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will " "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads) self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.is_causal = True self.attention_dropout = config.attention_dropout self.rope_scaling = config.rope_scaling self.scaling = self.head_dim**-0.5 self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config) class Qwen2MLP(Qwen2_5_VLMLP): pass class Qwen2_5OmniThinkerTextModel(Qwen2_5_VLTextModel): config: Qwen2_5OmniTextConfig _no_split_modules = ["Qwen2_5OmniDecoderLayer"] def __init__(self, config: Qwen2_5OmniTextConfig): super().__init__(config) @auto_docstring( custom_intro=""" The Qwen2.5OmniThinker model which consists of a audio backbone and a language model. """ ) class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniThinkerConfig base_model_prefix = "thinker" _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] _no_split_modules = ["Qwen2_5OmniAudioEncoder", "Qwen2_5OmniVisionEncoder"] def __init__(self, config: Qwen2_5OmniThinkerConfig): super().__init__(config) self.audio_tower = Qwen2_5OmniAudioEncoder._from_config(config.audio_config) self.visual = Qwen2_5OmniVisionEncoder._from_config(config.vision_config) self.vocab_size = config.text_config.vocab_size self.model = Qwen2_5OmniThinkerTextModel._from_config(config.text_config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1 self.spatial_merge_size = config.vision_config.spatial_merge_size self.rope_deltas = None self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def get_video_features( self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None ): """ Encodes videos into continuous embeddings that can be forwarded to the language model. Args: pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input videos. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. """ pixel_values_videos = pixel_values_videos.type(self.visual.dtype) video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw) return video_embeds def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. """ pixel_values = pixel_values.type(self.visual.dtype) image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw) return image_embeds def get_audio_features( self, input_features: torch.FloatTensor, feature_attention_mask: Optional[torch.LongTensor] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, ): """ Encodes audios into continuous embeddings that can be forwarded to the language model. Args: input_features (`torch.FloatTensor`): The tensors corresponding to the input audios. feature_attention_mask (`torch.LongTensor`, *optional*): Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. """ if feature_attention_mask is not None: audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0) else: audio_feature_lengths = None audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths( audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1) ) feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1) audio_outputs = self.audio_tower( input_features, feature_lens=feature_lens, aftercnn_lens=audio_feat_lengths, ) audio_features = audio_outputs.last_hidden_state if audio_features.shape[0] != sum(audio_output_lengths.tolist()): raise ValueError("length of audio_features should match audio_output_lengths") return audio_features def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor = None, video_features: torch.FloatTensor = None, ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_video_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_video_mask = special_video_mask.all(-1) special_audio_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device) ) ).all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_video_mask = input_ids == self.config.video_token_id special_audio_mask = input_ids == self.config.audio_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}" ) n_video_tokens = special_video_mask.sum() special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel(): raise ValueError( f"Videos features and image tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}" ) special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) return special_image_mask, special_video_mask, special_audio_mask @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, input_features: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, feature_attention_mask: Optional[torch.Tensor] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_audio_in_video: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, video_second_per_grid: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Qwen2_5OmniThinkerCausalLMOutputWithPast]: r""" image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*): Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_audio_in_video (`bool`, *optional*): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*): Number of seconds per grid for each video, used for temporal feature mapping. Example: ```python >>> from io import BytesIO >>> from urllib.request import urlopen >>> import librosa >>> from qwen_vl_utils import process_vision_info >>> from transformers import Qwen2_5OmniProcessor, Qwen2_5OmniThinkerForConditionalGeneration >>> thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B") >>> processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B") >>> conversations = [ >>> {'role': 'system', 'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'}, >>> {"role": "user", "content": [ >>> {"type": "image", "image_url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, >>> {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"}, >>> ]}, >>> ] >>> text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) >>> audios = [ librosa.load(BytesIO(urlopen( conversations[1]['content'][1]['audio_url'] ).read()), sr=self.processor.feature_extractor.sampling_rate) ] >>> images, videos = process_vision_info(conversations) >>> inputs = processor(text=text, audios=audios, images=images, videos=videos, return_tensors="pt", padding=True) >>> # Generate >>> inputs['use_audio_in_video'] = `True` or `False` >>> generation = thinker.generate(**inputs, max_new_tokens=2048) >>> generate_ids = generation[:, inputs.input_ids.size(1):] >>> response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: # 1. Extract the input embeddings inputs_embeds = self.get_input_embeddings()(input_ids) # 2. Merge text , audios , image and video if input_features is not None: audio_features = self.get_audio_features( input_features, feature_attention_mask=feature_attention_mask, audio_feature_lengths=audio_feature_lengths, ) audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype) _, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds) inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features) if pixel_values is not None: image_embeds = self.get_image_features(pixel_values, image_grid_thw) image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype) image_mask, _, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds ) inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds) if pixel_values_videos is not None: video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw) video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype) _, video_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds ) inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds) if feature_attention_mask is not None: audio_feature_lengths = torch.sum(feature_attention_mask, dim=1) else: audio_feature_lengths = None if attention_mask is not None and position_ids is None: if ( cache_position is None or (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None ): delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1) position_ids, rope_deltas = self.get_rope_index( input_ids, image_grid_thw, video_grid_thw, attention_mask, use_audio_in_video, audio_feature_lengths, video_second_per_grid, ) rope_deltas = rope_deltas - delta0 self.rope_deltas = rope_deltas else: batch_size, seq_length = input_ids.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=input_ids.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size ) if not return_dict: output = (logits,) + outputs return (loss,) + output if loss is not None else output return Qwen2_5OmniThinkerCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, input_features=None, feature_attention_mask=None, use_audio_in_video=False, video_second_per_grid=None, **kwargs, ): model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, input_features=input_features, feature_attention_mask=feature_attention_mask, use_audio_in_video=use_audio_in_video, video_second_per_grid=video_second_per_grid, **kwargs, ) model_inputs["position_ids"] = None if cache_position[0] != 0: model_inputs["pixel_values"] = None model_inputs["pixel_values_videos"] = None model_inputs["input_features"] = None return model_inputs ############################ # Start Talker # ############################ @dataclass @auto_docstring( custom_intro=""" Base class for Qwen2.5OmniTalker causal language model (or autoregressive) outputs. """ ) class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Hidden states from the thinker model that are used as input for the talker model. These represent the encoded response that the talker model will use to generate speech tokens. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None rope_deltas: Optional[torch.LongTensor] = None thinker_reply_part: torch.FloatTensor = None class Qwen2_5OmniTalkerModel(Qwen2_5_VLTextModel): config: Qwen2_5OmniTalkerConfig _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"] def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) self.embed_tokens = nn.Embedding(config.vocab_size, config.embedding_size, self.padding_idx) class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin): config: Qwen2_5OmniTalkerConfig base_model_prefix = "talker" def __init__(self, config: Qwen2_5OmniTalkerConfig): super().__init__(config) self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size) self.model = Qwen2_5OmniTalkerModel(config) self.codebook_size = config.vocab_size self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False) self.codec_bos_token = config.tts_codec_start_token_id self.codec_eos_token = config.tts_codec_end_token_id self.codec_pad_token = config.tts_codec_pad_token_id self.codec_mask_token = config.tts_codec_mask_token_id self.text_bos_token = config.tts_text_start_token_id self.text_eos_token = config.tts_text_end_token_id self.text_pad_token = config.tts_text_pad_token_id self.spatial_merge_size = self.config.spatial_merge_size self.rope_deltas = None self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) @auto_docstring def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, thinker_reply_part: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, input_text_ids: Optional[torch.LongTensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, use_audio_in_video: Optional[bool] = None, audio_feature_lengths: Optional[torch.LongTensor] = None, video_second_per_grid: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]: r""" thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Hidden states from the thinker model's output that represent the text reply part to be processed. rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*): The rope index difference between sequence length and multimodal rope. input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Input token IDs for text-only content, used for position calculation in multimodal contexts. image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*): The temporal, height and width of feature shape of each image in LLM. video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*): The temporal, height and width of feature shape of each video in LLM. use_audio_in_video (`bool`, *optional*): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*): The length of feature shape of each audio in LLM. video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*): Number of seconds per grid for each video, used for temporal feature mapping. Example: ```python >>> from io import BytesIO >>> from urllib.request import urlopen >>> import librosa >>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration >>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B") >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B") >>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:" >>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3" >>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate) >>> inputs = processor(text=prompt, audios=audio, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_length=30) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Generate the caption in English: Glass is breaking." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if attention_mask is not None and position_ids is None: if ( cache_position is None or (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None ): position_ids, rope_deltas = self.get_rope_index( input_text_ids, image_grid_thw, video_grid_thw, attention_mask, use_audio_in_video, audio_feature_lengths, video_second_per_grid, ) inputs_embeds[:, -1, :] += self.get_input_embeddings()( torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device) ) inputs_embeds[:, -2, :] += self.get_input_embeddings()( torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device) ) self.rope_deltas = rope_deltas else: batch_size, seq_length = input_ids.shape delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 position_ids = torch.arange(seq_length, device=input_ids.device) position_ids = position_ids.view(1, -1).expand(batch_size, -1) position_ids = position_ids.add(delta) position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) if inputs_embeds is None: # 1. Inference tokens after second token codec_embeds = self.get_input_embeddings()(input_ids) inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :] if thinker_reply_part.shape[1] > 1: thinker_reply_part = thinker_reply_part[:, 1:, :] talker_lm_input = self.thinker_to_talker_proj(inputs_embeds) if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) outputs = self.model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=talker_lm_input, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.codec_head(hidden_states) logits = logits.float() loss = None if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return Qwen2_5OmniTalkerCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas, thinker_reply_part=thinker_reply_part, ) def _get_initial_cache_position(self, seq_length, device, model_kwargs): # Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily inputs_embeds = model_kwargs.pop("inputs_embeds") model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs) model_kwargs["inputs_embeds"] = inputs_embeds return model_kwargs # prepare inputs for talker lm generation def prepare_inputs_for_generation( self, input_ids, input_text_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, thinker_reply_part=None, cache_position=None, position_ids=None, use_cache=True, pixel_values=None, pixel_values_videos=None, image_grid_thw=None, video_grid_thw=None, input_audio_features=None, audio_feature_attention_mask=None, audio_feature_lengths=None, use_audio_in_video=False, video_second_per_grid=None, **kwargs, ): model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values, attention_mask, inputs_embeds, cache_position, use_cache=use_cache, thinker_reply_part=thinker_reply_part, input_text_ids=input_text_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, use_audio_in_video=use_audio_in_video, audio_feature_lengths=audio_feature_lengths, video_second_per_grid=video_second_per_grid, **kwargs, ) model_inputs["position_ids"] = None return model_inputs def _update_model_kwargs_for_generation( self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool = False, num_new_tokens: int = 1, ) -> dict[str, Any]: model_kwargs = super()._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder, num_new_tokens ) if getattr(outputs, "thinker_reply_part", None) is not None: model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part return model_kwargs ############################ # Start Token2Wav # ############################ # Using custom RoPE, will use LlamaRotaryEmbedding next version class Qwen2_5OmniDiTRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim, base=10000): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer("inv_freq", inv_freq) def forward(self, x): batch_size, seq_len = x.shape[0], x.shape[1] t = torch.arange(seq_len, device=x.device) device_type = x.device.type device_type = device_type if device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = t.unsqueeze(1).float() @ self.inv_freq.unsqueeze(0).float() freqs = torch.stack((freqs, freqs), dim=-1) freqs = freqs.reshape(*freqs.shape[:-2], -1) freqs = freqs.repeat(batch_size, *([1] * freqs.dim())) cos = freqs.cos() sin = freqs.sin() return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Modified from Llama with a different rotate function, will fixed in next release def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ def rotate_half_codec(x): # x = rearrange(x, "... (d r) -> ... d r", r=2) x = x.reshape(*x.shape[:-1], -1, 2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return x.reshape(*x.shape[:-2], -1) cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half_codec(q) * sin) k_embed = (k * cos) + (rotate_half_codec(k) * sin) return q_embed, k_embed class TimeDelayNetBlock(nn.Module): def __init__( self, in_channels, out_channels, kernel_size, dilation, ): super().__init__() self.conv = nn.Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, padding="same", padding_mode="reflect", ) self.activation = nn.ReLU() def forward(self, hidden_states: torch.Tensor): return self.activation(self.conv(hidden_states)) class Res2NetBlock(torch.nn.Module): def __init__(self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1): super().__init__() in_channel = in_channels // scale hidden_channel = out_channels // scale self.blocks = nn.ModuleList( [ TimeDelayNetBlock( in_channel, hidden_channel, kernel_size=kernel_size, dilation=dilation, ) for i in range(scale - 1) ] ) self.scale = scale def forward(self, hidden_states): outputs = [] for i, hidden_part in enumerate(torch.chunk(hidden_states, self.scale, dim=1)): if i == 0: output_part = hidden_part elif i == 1: output_part = self.blocks[i - 1](hidden_part) else: output_part = self.blocks[i - 1](hidden_part + output_part) outputs.append(output_part) output = torch.cat(outputs, dim=1) return output class SqueezeExcitationBlock(nn.Module): def __init__(self, in_channels, se_channels, out_channels): super().__init__() self.conv1 = nn.Conv1d( in_channels=in_channels, out_channels=se_channels, kernel_size=1, padding="same", padding_mode="reflect", ) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv1d( in_channels=se_channels, out_channels=out_channels, kernel_size=1, padding="same", padding_mode="reflect", ) self.sigmoid = nn.Sigmoid() def forward(self, hidden_states): hidden_states_mean = hidden_states.mean(dim=2, keepdim=True) hidden_states_mean = self.relu(self.conv1(hidden_states_mean)) hidden_states_mean = self.sigmoid(self.conv2(hidden_states_mean)) return hidden_states * hidden_states_mean class AttentiveStatisticsPooling(nn.Module): """This class implements an attentive statistic pooling layer for each channel. It returns the concatenated mean and std of the input tensor. """ def __init__(self, channels, attention_channels=128): super().__init__() self.eps = 1e-12 self.tdnn = TimeDelayNetBlock(channels * 3, attention_channels, 1, 1) self.tanh = nn.Tanh() self.conv = nn.Conv1d( in_channels=attention_channels, out_channels=channels, kernel_size=1, padding="same", padding_mode="reflect", ) def _length_to_mask(self, length, max_len=None, dtype=None, device=None): """Creates a binary mask for each sequence. Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3 Arguments --------- length : torch.LongTensor Containing the length of each sequence in the batch. Must be 1D. max_len : int Max length for the mask, also the size of the second dimension. dtype : torch.dtype, default: None The dtype of the generated mask. device: torch.device, default: None The device to put the mask variable. Returns ------- mask : tensor The binary mask. """ if max_len is None: max_len = length.max().long().item() # using arange to generate mask mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand( len(length), max_len ) < length.unsqueeze(1) mask = torch.as_tensor(mask, dtype=dtype, device=device) return mask def _compute_statistics(self, x, m, dim=2): mean = (m * x).sum(dim) std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(self.eps)) return mean, std def forward(self, hidden_states): seq_length = hidden_states.shape[-1] lengths = torch.ones(hidden_states.shape[0], device=hidden_states.device) # Make binary mask of shape [N, 1, L] mask = self._length_to_mask( lengths * seq_length, max_len=seq_length, dtype=hidden_states.dtype, device=hidden_states.device ) mask = mask.unsqueeze(1) # Expand the temporal context of the pooling layer by allowing the # self-attention to look at global properties of the utterance. total = mask.sum(dim=2, keepdim=True) mean, std = self._compute_statistics(hidden_states, mask / total) mean = mean.unsqueeze(2).repeat(1, 1, seq_length) std = std.unsqueeze(2).repeat(1, 1, seq_length) attention = torch.cat([hidden_states, mean, std], dim=1) # Apply layers attention = self.conv(self.tanh(self.tdnn(attention))) # Filter out zero-paddings attention = attention.masked_fill(mask == 0, float("-inf")) attention = F.softmax(attention, dim=2) mean, std = self._compute_statistics(hidden_states, attention) # Append mean and std of the batch pooled_stats = torch.cat((mean, std), dim=1) pooled_stats = pooled_stats.unsqueeze(2) return pooled_stats class SqueezeExcitationRes2NetBlock(nn.Module): """An implementation of building block in ECAPA-TDNN, i.e., TDNN-Res2Net-TDNN-SqueezeExcitationBlock. """ def __init__( self, in_channels, out_channels, res2net_scale=8, se_channels=128, kernel_size=1, dilation=1, ): super().__init__() self.out_channels = out_channels self.tdnn1 = TimeDelayNetBlock( in_channels, out_channels, kernel_size=1, dilation=1, ) self.res2net_block = Res2NetBlock(out_channels, out_channels, res2net_scale, kernel_size, dilation) self.tdnn2 = TimeDelayNetBlock( out_channels, out_channels, kernel_size=1, dilation=1, ) self.se_block = SqueezeExcitationBlock(out_channels, se_channels, out_channels) def forward(self, hidden_state): residual = hidden_state hidden_state = self.tdnn1(hidden_state) hidden_state = self.res2net_block(hidden_state) hidden_state = self.tdnn2(hidden_state) hidden_state = self.se_block(hidden_state) return hidden_state + residual class ECAPA_TimeDelayNet(torch.nn.Module): """An implementation of the speaker embedding model in a paper. "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification" (https://huggingface.co/papers/2005.07143). """ def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() if len(config.enc_channels) != len(config.enc_kernel_sizes) or len(config.enc_channels) != len( config.enc_dilations ): raise ValueError("enc_channels, enc_kernel_sizes and enc_dilations should have same length") self.channels = config.enc_channels self.blocks = nn.ModuleList() # The initial TDNN layer self.blocks.append( TimeDelayNetBlock( config.mel_dim, config.enc_channels[0], config.enc_kernel_sizes[0], config.enc_dilations[0], ) ) # SE-Res2Net layers for i in range(1, len(config.enc_channels) - 1): self.blocks.append( SqueezeExcitationRes2NetBlock( config.enc_channels[i - 1], config.enc_channels[i], res2net_scale=config.enc_res2net_scale, se_channels=config.enc_se_channels, kernel_size=config.enc_kernel_sizes[i], dilation=config.enc_dilations[i], ) ) # Multi-layer feature aggregation self.mfa = TimeDelayNetBlock( config.enc_channels[-1], config.enc_channels[-1], config.enc_kernel_sizes[-1], config.enc_dilations[-1], ) # Attentive Statistical Pooling self.asp = AttentiveStatisticsPooling( config.enc_channels[-1], attention_channels=config.enc_attention_channels, ) # Final linear transformation self.fc = nn.Conv1d( in_channels=config.enc_channels[-1] * 2, out_channels=config.enc_dim, kernel_size=1, padding="same", padding_mode="reflect", ) def forward(self, hidden_states): # Minimize transpose for efficiency hidden_states = hidden_states.transpose(1, 2) hidden_states_list = [] for layer in self.blocks: hidden_states = layer(hidden_states) hidden_states_list.append(hidden_states) # Multi-layer feature aggregation hidden_states = torch.cat(hidden_states_list[1:], dim=1) hidden_states = self.mfa(hidden_states) # Attentive Statistical Pooling hidden_states = self.asp(hidden_states) # Final linear transformation hidden_states = self.fc(hidden_states) hidden_states = hidden_states.squeeze(-1) return hidden_states class DiTInputEmbedding(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() self.proj = nn.Linear( config.mel_dim + config.enc_dim + config.enc_emb_dim + config.emb_dim, config.hidden_size, ) self.spk_encoder = ECAPA_TimeDelayNet(config) def forward( self, hidden_states: torch.Tensor, speaker_embedding: torch.Tensor, condition_vector: torch.Tensor, code_embed: torch.Tensor, drop_audio_cond: Optional[bool] = False, code_embed_uncond: Optional[bool] = None, apply_cfg: Optional[bool] = True, ): if apply_cfg: hidden_states = torch.cat([hidden_states, hidden_states], dim=0) speaker_embedding = torch.cat([speaker_embedding, torch.zeros_like(speaker_embedding)], dim=0) condition_vector = torch.cat([condition_vector, torch.zeros_like(condition_vector)], dim=0) code_embed = torch.cat([code_embed, code_embed_uncond], dim=0) elif drop_audio_cond: # cfg for cond audio condition_vector = torch.zeros_like(condition_vector) speaker_embedding = torch.zeros_like(speaker_embedding) condition_vector = self.spk_encoder(condition_vector).unsqueeze(1).repeat(1, hidden_states.size(1), 1) hidden_states = self.proj(torch.cat((hidden_states, condition_vector, code_embed, speaker_embedding), dim=-1)) return hidden_states # Transformer backbone using DiT blocks class DiTCodecEmbedding(nn.Module): def __init__(self, codec_num_embeds, codec_dim, repeats): super().__init__() self.repeats = repeats self.codec_embed = nn.Embedding(codec_num_embeds + 1, codec_dim) def forward(self, code, drop_code=False): if drop_code: code = torch.zeros_like(code) code_embed = self.codec_embed(code) code_embed = torch.repeat_interleave(code_embed, repeats=self.repeats, dim=1) return code_embed # AdaLayerNormZero # return with modulated x for attn input, and params for later mlp modulation class Qwen2_5_OmniAdaLayerNormZero(nn.Module): def __init__(self, dim): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(dim, dim * 6) self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) def forward(self, hidden_states, emb=None): emb = self.linear(self.silu(emb)) shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale_msa[:, None]) + shift_msa[:, None] return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp # AdaLayerNormZero for final layer # return only with modulated x for attn input, cuz no more mlp modulation class Qwen2_5_OmniAdaLayerNormZero_Final(nn.Module): def __init__(self, dim): super().__init__() self.silu = nn.SiLU() self.linear = nn.Linear(dim, dim * 2) self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6) def forward(self, hidden_states, emb): emb = self.linear(self.silu(emb)) scale, shift = torch.chunk(emb, 2, dim=1) hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :] return hidden_states # FeedForward class DiTMLP(nn.Module): def __init__(self, dim, mult=4, dropout=0.0): super().__init__() inner_dim = int(dim * mult) self.ff = nn.ModuleList( [ nn.Linear(dim, inner_dim), nn.GELU(approximate="tanh"), nn.Dropout(dropout), nn.Linear(inner_dim, dim), ] ) def forward(self, hidden_states): for layer in self.ff: hidden_states = layer(hidden_states) return hidden_states class DiTAttention(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__() self.config = config self.dim = config.hidden_size self.heads = config.num_attention_heads self.inner_dim = config.head_dim * config.num_attention_heads self.dropout = config.dropout self.is_causal = False self.to_q = nn.Linear(config.hidden_size, self.inner_dim) self.to_k = nn.Linear(config.hidden_size, self.inner_dim) self.to_v = nn.Linear(config.hidden_size, self.inner_dim) self.to_out = nn.ModuleList([nn.Linear(self.inner_dim, config.hidden_size), nn.Dropout(config.dropout)]) def forward( self, hidden_states, # noised input x position_embeddings=None, # rotary position embedding for x attention_mask=None, ) -> torch.Tensor: batch_size = hidden_states.shape[0] # `sample` projections. query = self.to_q(hidden_states) key = self.to_k(hidden_states) value = self.to_v(hidden_states) # attention inner_dim = key.shape[-1] head_dim = inner_dim // self.heads query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2) # apply rotary position embedding # Due to training process, only first head is applied with RoPE, will be fixed at next release cos, sin = position_embeddings query[:, :1], key[:, :1] = apply_rotary_pos_emb(query[:, :1], key[:, :1], cos, sin) attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attention_weights, _ = attention_interface( self, query, key, value, attention_mask=attention_mask, is_causal=False, ) # mask. e.g. inference got a batch with different target durations, mask out the padding attention_weights = attention_weights.reshape(batch_size, -1, self.heads * head_dim) attention_weights = attention_weights.to(query.dtype) # linear proj attention_output = self.to_out[0](attention_weights) attention_output = self.to_out[1](attention_output) return attention_output # time step conditioning embedding class SinusPositionEmbedding(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, hidden_states, scale=1000): device = hidden_states.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb) emb = scale * hidden_states.unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb.type_as(hidden_states) class DiTTimestepEmbedding(nn.Module): def __init__(self, dim, freq_embed_dim=256): super().__init__() self.time_embed = SinusPositionEmbedding(freq_embed_dim) self.time_mlp = nn.ModuleList([nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)]) def forward(self, timestep): # noqa: F821 time_hidden = self.time_embed(timestep) time_hidden = time_hidden.to(timestep.dtype) for layer in self.time_mlp: time_hidden = layer(time_hidden) # b d return time_hidden class DiTDecoderLayer(nn.Module): def __init__(self, config: Qwen2_5OmniDiTConfig, look_ahead_block=0, look_backward_block=0): super().__init__() self.attn_norm = Qwen2_5_OmniAdaLayerNormZero(config.hidden_size) self.attn = DiTAttention(config) self.look_ahead_block = look_ahead_block self.look_backward_block = look_backward_block self.ff_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, eps=1e-6) self.ff = DiTMLP(dim=config.hidden_size, mult=config.ff_mult, dropout=config.dropout) def forward( self, hidden_states, timestep, position_embeddings=None, block_diff=None ): # x: noised input, t: time embedding # pre-norm & modulation for attention input norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(hidden_states, emb=timestep) # attention attn_output = self.attn( hidden_states=norm, position_embeddings=position_embeddings, attention_mask=(block_diff >= -float(self.look_backward_block)) & (block_diff <= float(self.look_ahead_block)), ) # process attention output for input x hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_output norm = self.ff_norm(hidden_states) * (1 + scale_mlp[:, None]) + shift_mlp[:, None] ff_output = self.ff(norm) hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output return hidden_states class SnakeBeta(nn.Module): """ A modified Snake function which uses separate parameters for the magnitude of the periodic components Shape: - Input: (B, C, T) - Output: (B, C, T), same shape as the input Parameters: - alpha - trainable parameter that controls frequency - beta - trainable parameter that controls magnitude References: - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: https://huggingface.co/papers/2006.08195 """ def __init__(self, in_features, alpha=1.0): super().__init__() self.in_features = in_features # initialize alpha self.alpha = Parameter(torch.zeros(in_features) * alpha) self.beta = Parameter(torch.zeros(in_features) * alpha) self.no_div_by_zero = 0.000000001 def forward(self, hidden_states): """ Forward pass of the function. Applies the function to the input elementwise. SnakeBeta ∶= x + 1/b * sin^2 (xa) """ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] beta = self.beta.unsqueeze(0).unsqueeze(-1) alpha = torch.exp(alpha) beta = torch.exp(beta) hidden_states = hidden_states + (1.0 / (beta + self.no_div_by_zero)) * torch.pow( torch.sin(hidden_states * alpha), 2 ) return hidden_states def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): """Generates a 1D Kaiser-windowed sinc filter. Args: cutoff (float): Normalized cutoff frequency (0 to 0.5). half_width (float): Transition bandwidth. kernel_size (int): Number of filter taps. Returns: torch.Tensor: A tensor of shape (1, 1, kernel_size) representing the filter. """ is_even = kernel_size % 2 == 0 half_size = kernel_size // 2 # Compute Kaiser window parameters delta_f = 4 * half_width attenuation = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 if attenuation > 50.0: beta = 0.1102 * (attenuation - 8.7) elif attenuation >= 21.0: beta = 0.5842 * (attenuation - 21) ** 0.4 + 0.07886 * (attenuation - 21.0) else: beta = 0.0 kaiser_window = torch.kaiser_window(kernel_size, beta=beta, periodic=False, dtype=torch.float32) # Compute time indices if is_even: time_indices = torch.arange(-half_size, half_size) + 0.5 else: time_indices = torch.arange(kernel_size) - half_size # Compute sinc filter if cutoff == 0: return torch.zeros((1, 1, kernel_size), dtype=torch.float32) # Ensures correct shape sinc_filter = torch.sinc(2 * cutoff * time_indices) normalized_filter = 2 * cutoff * kaiser_window * sinc_filter # Normalize to ensure sum = 1 (avoid leakage of constant component) normalized_filter /= normalized_filter.sum() return normalized_filter.view(1, 1, kernel_size) class UpSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() self.ratio = ratio self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size self.stride = ratio self.pad = self.kernel_size // ratio - 1 self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size) self.register_buffer("filter", filter, persistent=False) def forward(self, hidden_states): channels = hidden_states.shape[1] hidden_states = F.pad(hidden_states, (self.pad, self.pad), mode="replicate") hidden_states = self.ratio * F.conv_transpose1d( hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels ) hidden_states = hidden_states[..., self.pad_left : -self.pad_right] return hidden_states class DownSample1d(nn.Module): def __init__(self, ratio=2, kernel_size=None): super().__init__() cutoff = 0.5 / ratio half_width = 0.6 / ratio if cutoff < 0.0: raise ValueError("Minimum cutoff must be larger than zero.") if cutoff > 0.5: raise ValueError("A cutoff above 0.5 does not make sense.") self.even = kernel_size % 2 == 0 self.pad_left = kernel_size // 2 - int(self.even) self.pad_right = kernel_size // 2 self.stride = ratio filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) self.register_buffer("filter", filter, persistent=False) def forward(self, hidden_states): channels = hidden_states.shape[1] hidden_states = F.pad(hidden_states, (self.pad_left, self.pad_right), mode="replicate") out = F.conv1d(hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels) return out class TorchActivation1d(nn.Module): def __init__( self, activation, up_ratio: int = 2, down_ratio: int = 2, up_kernel_size: int = 12, down_kernel_size: int = 12, ): super().__init__() if not callable(activation): raise TypeError("Activation function must be callable") self.act = activation self.upsample = UpSample1d(up_ratio, up_kernel_size) self.downsample = DownSample1d(down_ratio, down_kernel_size) def forward(self, hidden_states): hidden_states = self.upsample(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.downsample(hidden_states) return hidden_states class AMPBlock(torch.nn.Module): def __init__( self, channels, kernel_size=3, dilation=(1, 3, 5), ): super().__init__() self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=self._get_padding(kernel_size, dilation[0]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=self._get_padding(kernel_size, dilation[1]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=self._get_padding(kernel_size, dilation[2]), ), ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), ] ) self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers self.activations = nn.ModuleList( [TorchActivation1d(activation=SnakeBeta(channels)) for _ in range(self.num_layers)] ) def _get_padding(self, kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) def forward(self, hidden_states): acts1, acts2 = self.activations[::2], self.activations[1::2] for conv1, conv2, act1, act2 in zip(self.convs1, self.convs2, acts1, acts2): residual = hidden_states hidden_states = act1(hidden_states) hidden_states = conv1(hidden_states) hidden_states = act2(hidden_states) hidden_states = conv2(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2WavBigVGAN model. Which take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniBigVGANConfig def __init__(self, config: Qwen2_5OmniBigVGANConfig): super().__init__(config) self.num_residual_blocks = len(config.resblock_kernel_sizes) self.num_upsample_layers = len(config.upsample_rates) self.conv_pre = nn.Conv1d(config.mel_dim, config.upsample_initial_channel, 7, 1, padding=3) # Removing extra ModuleList breaks official state dict ups = [ nn.ModuleList( [ nn.ConvTranspose1d( config.upsample_initial_channel // (2**layer_idx), config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, stride, padding=(kernel_size - stride) // 2, ) ] ) for layer_idx, (stride, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)) ] self.ups = nn.ModuleList(ups) self.resblocks = nn.ModuleList( [ AMPBlock(config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, dilation) for layer_idx in range(self.num_upsample_layers) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes) ] ) self.activation_post = TorchActivation1d( activation=SnakeBeta(config.upsample_initial_channel // (2**self.num_upsample_layers)) ) self.conv_post = nn.Conv1d( config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False ) def normalize_spectrogram(self, spectrogram, max_value, min_db): return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value) def amplitude_to_db(self, amplitude, min_db_level): min_level = torch.exp( torch.tensor(min_db_level / 20.0 * np.log(10), device=amplitude.device, dtype=amplitude.dtype) ) return 20 * torch.log10(torch.clamp(amplitude, min=min_level)) def process_mel_spectrogram(self, mel_spectrogram): amplitude_spectrum = torch.exp(mel_spectrogram) decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20 return self.normalize_spectrogram(decibel_spectrum, 1, -115) def forward(self, mel_spectrogram): processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram) hidden_representation = self.conv_pre(processed_spectrogram) for layer_index in range(self.num_upsample_layers): hidden_representation = self.ups[layer_index][0](hidden_representation) residual_output = sum( self.resblocks[layer_index * self.num_residual_blocks + block_index](hidden_representation) for block_index in range(self.num_residual_blocks) ) residual_output = residual_output / self.num_residual_blocks hidden_representation = residual_output hidden_representation = self.activation_post(hidden_representation) output_waveform = self.conv_post(hidden_representation) return torch.clamp(output_waveform, min=-1.0, max=1.0).squeeze().cpu() class RungeKutta4ODESolver: def __init__(self, function, initial_value): self.function = function self.initial_value = initial_value self._one_third = 1 / 3 self._two_thirds = 2 / 3 def _rk4_step(self, function, time_start, time_step, time_end, value_start, function_value_start=None): k1 = function_value_start if function_value_start is not None else function(time_start, value_start) k2 = function(time_start + time_step * self._one_third, value_start + time_step * k1 * self._one_third) k3 = function(time_start + time_step * self._two_thirds, value_start + time_step * (k2 - k1 * self._one_third)) k4 = function(time_end, value_start + time_step * (k1 - k2 + k3)) return (k1 + 3 * (k2 + k3) + k4) * time_step / 8 def _compute_step(self, function, time_start, time_step, time_end, value_start): function_value_start = function(time_start, value_start) return self._rk4_step( function, time_start, time_step, time_end, value_start, function_value_start=function_value_start ), function_value_start def _linear_interpolation(self, time_start, time_end, value_start, value_end, time_point): if time_point == time_start: return value_start if time_point == time_end: return value_end weight = (time_point - time_start) / (time_end - time_start) return value_start + weight * (value_end - value_start) def integrate(self, time_points): solution = torch.empty( len(time_points), *self.initial_value.shape, dtype=self.initial_value.dtype, device=self.initial_value.device, ) solution[0] = self.initial_value current_index = 1 current_value = self.initial_value for time_start, time_end in zip(time_points[:-1], time_points[1:]): time_step = time_end - time_start delta_value, _ = self._compute_step(self.function, time_start, time_step, time_end, current_value) next_value = current_value + delta_value while current_index < len(time_points) and time_end >= time_points[current_index]: solution[current_index] = self._linear_interpolation( time_start, time_end, current_value, next_value, time_points[current_index] ) current_index += 1 current_value = next_value return solution @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2WavDiT model. Which take speech tokens as input and predict mel spectrogram. """ ) class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniDiTConfig _no_split_modules = ["DiTDecoderLayer"] def __init__(self, config: Qwen2_5OmniDiTConfig): super().__init__(config) self.mel_dim = config.mel_dim self.repeats = config.repeats self.time_embed = DiTTimestepEmbedding(config.hidden_size) self.text_embed = DiTCodecEmbedding(config.num_embeds, config.emb_dim, config.repeats) self.input_embed = DiTInputEmbedding(config) self.rotary_embed = Qwen2_5OmniDiTRotaryEmbedding(config.head_dim) self.hidden_size = config.hidden_size self.layers = config.num_hidden_layers self.block_size = config.block_size self.num_attention_heads = config.num_attention_heads self.transformer_blocks = nn.ModuleList() for i in range(config.num_hidden_layers): self.transformer_blocks.append( DiTDecoderLayer( config, look_ahead_block=1 if i in config.look_ahead_layers else 0, look_backward_block=1 if i in config.look_backward_layers else 0, ) ) self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation self.proj_out = nn.Linear(config.hidden_size, config.mel_dim) def _create_block_diff(self, hidden_states): batch, seq_len = hidden_states.shape[0], hidden_states.shape[1] block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length] block_i = block_indices.unsqueeze(1) # [seq_length, 1] block_j = block_indices.unsqueeze(0) # [1, seq_length] block_diff = block_j - block_i # (n, n) return block_diff.expand(batch, self.num_attention_heads, seq_len, seq_len) def forward( self, hidden_states, condition_vector, speaker_embedding, quantized_code, time_step, drop_audio_conditioning=False, drop_code=False, apply_cfg=True, ): batch_size = hidden_states.shape[0] if time_step.ndim == 0: time_step = time_step.repeat(batch_size) # Compute embeddings time_embedding = self.time_embed(time_step) text_embedding = self.text_embed(quantized_code, drop_code=False if apply_cfg else drop_code) text_embedding_unconditioned = self.text_embed(quantized_code, drop_code=True) if apply_cfg else None hidden_states = self.input_embed( hidden_states, speaker_embedding, condition_vector, text_embedding, drop_audio_cond=drop_audio_conditioning, code_embed_uncond=text_embedding_unconditioned, apply_cfg=apply_cfg, ) # Compute positional encodings position_embeddings = self.rotary_embed(hidden_states) blockwise_difference = self._create_block_diff(hidden_states) # Transformer blocks for transformer_block in self.transformer_blocks: hidden_states = transformer_block( hidden_states, time_embedding, position_embeddings=position_embeddings, block_diff=blockwise_difference, ) hidden_states = self.norm_out(hidden_states, time_embedding) output = self.proj_out(hidden_states) return output @torch.no_grad() def sample( self, conditioning_vector, reference_mel_spectrogram, quantized_code, num_steps=10, guidance_scale=0.5, sway_coefficient=-1.0, ): noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype) maximum_duration = quantized_code.shape[1] * self.repeats initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device) batch_size = reference_mel_spectrogram.shape[0] conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1) if batch_size != 1: raise ValueError("Only batch size = 1 is currently supported") def ode_function(time_step, hidden_states): if guidance_scale < 1e-5: prediction = self( hidden_states=hidden_states, speaker_embedding=conditioning_vector, condition_vector=reference_mel_spectrogram, quantized_code=quantized_code, time_step=time_step, drop_audio_conditioning=False, drop_code=False, ) return prediction model_output = self( hidden_states=hidden_states, quantized_code=quantized_code, speaker_embedding=conditioning_vector, condition_vector=reference_mel_spectrogram, time_step=time_step, apply_cfg=True, ) guided_prediction, null_prediction = torch.chunk(model_output, 2, dim=0) return guided_prediction + (guided_prediction - null_prediction) * guidance_scale initial_time = 0 time_embedding = torch.linspace( initial_time, 1, num_steps, device=quantized_code.device, dtype=conditioning_vector.dtype ) if sway_coefficient is not None: time_embedding += sway_coefficient * (torch.cos(torch.pi / 2 * time_embedding) - 1 + time_embedding) ode_solver = RungeKutta4ODESolver(function=ode_function, initial_value=initial_state) solution_trajectory = ode_solver.integrate(time_embedding) generated_waveform = solution_trajectory[-1] generated_mel_spectrogram = generated_waveform.permute(0, 2, 1) return generated_mel_spectrogram @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2Wav model. Consists a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel): config: Qwen2_5OmniToken2WavConfig base_model_prefix = "model" _no_split_modules = ["Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel"] def __init__(self, config: Qwen2_5OmniToken2WavConfig): super().__init__(config) attn_impl = config._attn_implementation if config._attn_implementation == "flash_attention_2": logger.warning_once( "Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, " "attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa." ) attn_impl = "sdpa" elif config._attn_implementation == "eager": logger.warning_once( "Qwen2_5OmniToken2WavModel does not support eager attention implementation, fall back to sdpa" ) attn_impl = "sdpa" self.code2wav_dit_model = Qwen2_5OmniToken2WavDiTModel._from_config( config.dit_config, attn_implementation=attn_impl ) self.code2wav_bigvgan_model = Qwen2_5OmniToken2WavBigVGANModel._from_config( config.bigvgan_config, attn_implementation=attn_impl ) def forward( self, code, conditioning, reference_mel, num_steps=10, guidance_scale=0.5, sway_coefficient=-1.0, **kwargs, ): """Generates a waveform from input code and conditioning parameters.""" mel_spectrogram = self.code2wav_dit_model.sample( conditioning, reference_mel, code, num_steps=num_steps, guidance_scale=guidance_scale, sway_coefficient=sway_coefficient, ) waveform = self.code2wav_bigvgan_model(mel_spectrogram) return waveform ############################ # Start Qwen2.5Omni # ############################ @auto_docstring( custom_intro=""" The full Qwen2.5Omni model, a multimodal model composed of 3 sub-models: - [`Qwen2_5OmniThinkerForConditionalGeneration`]: a causal auto-regressive transformer takes text, audio, image, video as input and predict text tokens. - [`Qwen2_5OmniTalkerForConditionalGeneration`]: a causal auto-regressive transformer takes thinker hidden states and response as input and predict speech tokens. - [`Qwen2_5OmniToken2WavModel`]: a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform. """ ) class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin): config: Qwen2_5OmniConfig _no_split_modules = [ "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavModel", ] def __init__(self, config): super().__init__(config) self.thinker = Qwen2_5OmniThinkerForConditionalGeneration(config.thinker_config) self.has_talker = config.enable_audio_output self.speaker_map = {} if config.enable_audio_output: self.enable_talker() self.post_init() def enable_talker(self): self.talker = Qwen2_5OmniTalkerForConditionalGeneration(self.config.talker_config) self.token2wav = Qwen2_5OmniToken2WavModel(self.config.token2wav_config) self.token2wav.float() self.has_talker = True def load_speakers(self, path): check_torch_load_is_safe() for key, value in torch.load(path, weights_only=True).items(): self.speaker_map[key] = value logger.info(f"Speaker {list(self.speaker_map.keys())} loaded") def disable_talker(self): if hasattr(self, "talker"): del self.talker if hasattr(self, "token2wav"): del self.token2wav self.has_talker = False @classmethod def from_pretrained( cls, pretrained_model_name_or_path, *model_args, config=None, cache_dir=None, ignore_mismatched_sizes=False, force_download=False, local_files_only=False, token=None, revision="main", use_safetensors=None, weights_only=True, **kwargs, ): model = super().from_pretrained( pretrained_model_name_or_path, *model_args, config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, use_safetensors=use_safetensors, weights_only=weights_only, **kwargs, ) spk_path = cached_file( pretrained_model_name_or_path, "spk_dict.pt", subfolder=kwargs.pop("subfolder", None), cache_dir=kwargs.pop("cache_dir", None), force_download=kwargs.pop("force_download", False), proxies=kwargs.pop("proxies", None), resume_download=kwargs.pop("resume_download", None), local_files_only=kwargs.pop("local_files_only", False), token=kwargs.pop("use_auth_token", None), revision=kwargs.pop("revision", None), ) if spk_path is None: raise ValueError(f"""{pretrained_model_name_or_path}/{spk_path} not exists""") model.load_speakers(spk_path) return model @torch.no_grad() # TODO: raushan, defaults should be saved in generation config def generate( self, input_ids: Optional[torch.Tensor] = None, speaker: str = "Chelsie", use_audio_in_video: bool = False, return_audio: Optional[bool] = None, thinker_max_new_tokens: int = 1024, talker_max_new_tokens: int = 4096, talker_do_sample: bool = True, talker_top_k: int = 40, talker_top_p: float = 0.8, talker_temperature: float = 0.9, talker_eos_token_id: list[int] = [8292, 8294], talker_repetition_penalty: float = 1.05, **kwargs, ): r""" Generate text response and audio from input. Args: input_ids (`Optional[torch.Tensor]`, *optional*): Input ids, should obtain from processor. speaker (`str` , defaults to "Chelsie"): Which speaker should be used in audio response. use_audio_in_video (`bool`, defaults to False): Whether or not use audio track in video, should same as the parameter in `process_audio_info`. return_audio (`Optional[bool]`, *optional*): Whether or not return response in audio format. When `return_audio=None`, this parameter is same as `config.enable_audio_output`. kwargs (*optional*): - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the thinker, talker and token2wav respectively. It has the priority over the keywords without a prefix. Returns: When `return_audio=False`: - **Text** (`torch.Tensor`): Generated text token sequence. When `return_audio=True`: - **Text** (`torch.Tensor`): Generated text token sequence. - **Audio waveform** (`torch.Tensor`): Generated audio waveform. """ if speaker not in self.speaker_map: raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}") if return_audio and not self.has_talker: raise ValueError( "Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker." ) if return_audio is None: return_audio = self.has_talker if input_ids.shape[0] != 1 and return_audio: raise NotImplementedError("Qwen2.5-Omni currently does not support batched inference with audio output") shared_kwargs = {"use_audio_in_video": use_audio_in_video} thinker_kwargs = { "max_new_tokens": thinker_max_new_tokens, } talker_kwargs = { "max_new_tokens": talker_max_new_tokens, "do_sample": talker_do_sample, "top_k": talker_top_k, "top_p": talker_top_p, "temperature": talker_temperature, "eos_token_id": talker_eos_token_id, "repetition_penalty": talker_repetition_penalty, } token2wav_kwargs = {} for key, value in kwargs.items(): if key.startswith("thinker_"): thinker_kwargs[key[len("thinker_") :]] = value elif key.startswith("talker_"): talker_kwargs[key[len("talker_") :]] = value elif key.startswith("token2wav_"): token2wav_kwargs[key[len("token2wav_") :]] = value # Process special input values elif key == "feature_attention_mask": thinker_kwargs[key] = value talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1) elif key == "input_features" or key == "attention_mask": thinker_kwargs[key] = value # Put other key to shared kwargs else: shared_kwargs[key] = value # Merge kwargs for key, value in shared_kwargs.items(): if key not in thinker_kwargs: thinker_kwargs[key] = value if key not in talker_kwargs: talker_kwargs[key] = value if key not in token2wav_kwargs: token2wav_kwargs[key] = value speaker_params = self.speaker_map[speaker] # 1. Generate from thinker module generate_audio = return_audio and self.has_talker if generate_audio: thinker_kwargs["output_hidden_states"] = True thinker_kwargs["return_dict_in_generate"] = True thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs) if not generate_audio: return thinker_result # 2. Generate speech tokens from talker module embeds_to_talker = thinker_result.hidden_states[0][0].clone().to(input_ids.device) if thinker_kwargs.get("input_features") is not None: audio_ids_mask = input_ids == self.config.thinker_config.audio_token_index audio_mask = audio_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) audio_mask_tensor = torch.zeros( [audio_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(audio_mask, audio_mask_tensor) if thinker_kwargs.get("pixel_values") is not None: image_ids_mask = input_ids == self.config.thinker_config.image_token_index image_mask = image_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) image_mask_tensor = torch.zeros( [image_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(image_mask, image_mask_tensor) if thinker_kwargs.get("pixel_values_videos") is not None: video_ids_mask = input_ids == self.config.thinker_config.video_token_index video_mask = video_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker) video_mask_tensor = torch.zeros( [video_ids_mask.sum(), embeds_to_talker.shape[-1]], dtype=embeds_to_talker.dtype, device=input_ids.device, ) embeds_to_talker.masked_scatter_(video_mask, video_mask_tensor) processed_thinker_hidden = ( (embeds_to_talker,) + thinker_result.hidden_states[0][1:], ) + thinker_result.hidden_states[1:] thinker_generate_ids = thinker_result.sequences[:, input_ids.size(1) :].to(input_ids.device) thinker_token_embeds = [ token_hidden_states[0].to(input_ids.device) for token_hidden_states in processed_thinker_hidden ] thinker_hidden_states = [ token_hidden_states[-1].to(input_ids.device) for token_hidden_states in processed_thinker_hidden ] talker_text_bos_token = speaker_params["bos_token"] talker_input_text_ids = torch.cat( [ input_ids, torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device), thinker_generate_ids[:, :1], ], dim=-1, ) talker_input_ids = torch.cat( [ torch.full_like(input_ids, fill_value=self.talker.codec_mask_token), torch.tensor([[self.talker.codec_pad_token]], dtype=torch.long, device=input_ids.device), torch.tensor([[self.talker.codec_bos_token]], dtype=torch.long, device=input_ids.device), ], dim=1, ) thinker_embed_tokens = self.thinker.get_input_embeddings() thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) talker_inputs_embeds = thinker_hidden_states[0] + thinker_token_embeds[0] talker_text_bos_token = torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device) talker_text_bos_embed = thinker_embed_tokens(talker_text_bos_token).to(input_ids.device) talker_inputs_embeds = torch.cat( [ talker_inputs_embeds, talker_text_bos_embed, thinker_reply_part[:, :1, :], ], dim=1, ) eos_embedding = thinker_embed_tokens( torch.tensor([[self.talker.text_eos_token]], dtype=torch.long, device=input_ids.device) ) pad_embedding = thinker_embed_tokens( torch.tensor([[self.talker.text_pad_token]], dtype=torch.long, device=input_ids.device) ) thinker_reply_part = torch.cat( [ thinker_reply_part[:, 1:, :], eos_embedding, pad_embedding, ], dim=1, ) talker_attention_mask = None if "attention_mask" in kwargs: talker_attention_mask = torch.cat( [kwargs["attention_mask"], kwargs["attention_mask"].new_ones((1, 2))], dim=1 ).to(input_ids.device) talker_result = self.talker.generate( input_ids=talker_input_ids, input_text_ids=talker_input_text_ids, thinker_reply_part=thinker_reply_part, inputs_embeds=talker_inputs_embeds, attention_mask=talker_attention_mask, suppress_tokens=[self.talker.codec_bos_token], **{k: (v.to(input_ids.device) if torch.is_tensor(v) else v) for k, v in talker_kwargs.items()}, ) talker_generate_codes = talker_result[:, talker_input_ids.shape[1] : -1] # 3. Generate wavs from code if self.token2wav.dtype != torch.float: self.token2wav.float() wav = self.token2wav( talker_generate_codes.to(input_ids.device), conditioning=speaker_params["cond"].to(input_ids.device).float(), reference_mel=speaker_params["ref_mel"].to(input_ids.device).float(), **token2wav_kwargs, ) return thinker_result.sequences, wav.float() __all__ = [ "Qwen2_5OmniConfig", "Qwen2_5OmniThinkerConfig", "Qwen2_5OmniTalkerConfig", "Qwen2_5OmniToken2WavConfig", "Qwen2_5OmniForConditionalGeneration", "Qwen2_5OmniThinkerTextModel", "Qwen2_5OmniThinkerForConditionalGeneration", "Qwen2_5OmniTalkerModel", "Qwen2_5OmniTalkerForConditionalGeneration", "Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel", "Qwen2_5OmniToken2WavModel", "Qwen2_5OmniPreTrainedModel", "Qwen2_5OmniPreTrainedModelForConditionalGeneration", ]
transformers/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py/0
{ "file_path": "transformers/src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py", "repo_id": "transformers", "token_count": 87176 }
542
# coding=utf-8 # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Qwen2-VL.""" import math from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import ( convert_to_rgb, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, logging from ...video_utils import VideoInput, make_batched_videos logger = logging.get_logger(__name__) def smart_resize( height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280 ): """Rescales the image so that the following conditions are met: 1. Both dimensions (height and width) are divisible by 'factor'. 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. 3. The aspect ratio of the image is maintained as closely as possible. """ if max(height, width) / min(height, width) > 200: raise ValueError( f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}" ) h_bar = round(height / factor) * factor w_bar = round(width / factor) * factor if h_bar * w_bar > max_pixels: beta = math.sqrt((height * width) / max_pixels) h_bar = max(factor, math.floor(height / beta / factor) * factor) w_bar = max(factor, math.floor(width / beta / factor) * factor) elif h_bar * w_bar < min_pixels: beta = math.sqrt(min_pixels / (height * width)) h_bar = math.ceil(height * beta / factor) * factor w_bar = math.ceil(width * beta / factor) * factor return h_bar, w_bar class Qwen2VLImageProcessor(BaseImageProcessor): r""" Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}`): Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. min_pixels (`int`, *optional*, defaults to `56 * 56`): The min pixels of the image to resize the image. max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`): The max pixels of the image to resize the image. patch_size (`int`, *optional*, defaults to 14): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to 2): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder. """ model_input_names = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, min_pixels: Optional[int] = None, max_pixels: Optional[int] = None, patch_size: int = 14, temporal_patch_size: int = 2, merge_size: int = 2, **kwargs, ) -> None: super().__init__(**kwargs) if size is not None and ("shortest_edge" not in size or "longest_edge" not in size): raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") else: size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280} # backward compatibility: override size with min_pixels and max_pixels if they are provided if min_pixels is not None: size["shortest_edge"] = min_pixels if max_pixels is not None: size["longest_edge"] = max_pixels self.min_pixels = size["shortest_edge"] self.max_pixels = size["longest_edge"] self.size = size self.do_resize = do_resize self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.patch_size = patch_size self.temporal_patch_size = temporal_patch_size self.merge_size = merge_size self.do_convert_rgb = do_convert_rgb def _preprocess( self, images: Union[ImageInput, VideoInput], do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, patch_size: Optional[int] = None, temporal_patch_size: Optional[int] = None, merge_size: Optional[int] = None, do_convert_rgb: Optional[bool] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`. Args: images (`ImageInput`): Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`. vision_info (`list[Dict]`, *optional*): Optional list of dictionaries containing additional information about vision inputs. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image. patch_size (`int`, *optional*, defaults to `self.patch_size`): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to `self.merge_size`): The merge size of the vision encoder to llm encoder. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ images = make_list_of_images(images) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) height, width = get_image_size(images[0], channel_dim=input_data_format) resized_height, resized_width = height, width processed_images = [] for image in images: if do_resize: resized_height, resized_width = smart_resize( height, width, factor=patch_size * merge_size, min_pixels=size["shortest_edge"], max_pixels=size["longest_edge"], ) image = resize( image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) processed_images.append(image) patches = np.array(processed_images) if data_format == ChannelDimension.LAST: patches = patches.transpose(0, 3, 1, 2) if patches.shape[0] % temporal_patch_size != 0: repeats = np.repeat( patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size), axis=0 ) patches = np.concatenate([patches, repeats], axis=0) channel = patches.shape[1] grid_t = patches.shape[0] // temporal_patch_size grid_h, grid_w = resized_height // patch_size, resized_width // patch_size patches = patches.reshape( grid_t, temporal_patch_size, channel, grid_h // merge_size, merge_size, patch_size, grid_w // merge_size, merge_size, patch_size, ) patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8) flatten_patches = patches.reshape( grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size ) return flatten_patches, (grid_t, grid_h, grid_w) def preprocess( self, images: ImageInput, videos: VideoInput = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, min_pixels: Optional[int] = None, max_pixels: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, patch_size: Optional[int] = None, temporal_patch_size: Optional[int] = None, merge_size: Optional[int] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. videos (`VideoInput`): Video to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If passing in videos with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. min_pixels (`int`, *optional*, defaults to `self.min_pixels`): The min pixels of the image to resize the image. max_pixels (`int`, *optional*, defaults to `self.max_pixels`): The max pixels of the image to resize the image. patch_size (`int`, *optional*, defaults to `self.patch_size`): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to `self.merge_size`): The merge size of the vision encoder to llm encoder. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ min_pixels = min_pixels if min_pixels is not None else self.min_pixels max_pixels = max_pixels if max_pixels is not None else self.max_pixels if size is not None: if "shortest_edge" not in size or "longest_edge" not in size: raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.") min_pixels = size["shortest_edge"] elif min_pixels is not None and max_pixels is not None: # backward compatibility: override size with min_pixels and max_pixels if they are provided size = {"shortest_edge": min_pixels, "longest_edge": max_pixels} else: size = {**self.size} do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std patch_size = patch_size if patch_size is not None else self.patch_size temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size merge_size = merge_size if merge_size is not None else self.merge_size do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb if images is not None: images = make_flat_list_of_images(images) if images is not None and not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) data = {} if images is not None: pixel_values, vision_grid_thws = [], [] for image in images: patches, image_grid_thw = self._preprocess( image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, patch_size=patch_size, temporal_patch_size=temporal_patch_size, merge_size=merge_size, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, ) pixel_values.extend(patches) vision_grid_thws.append(image_grid_thw) pixel_values = np.array(pixel_values) vision_grid_thws = np.array(vision_grid_thws) data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws}) # kept for BC only and should be removed after v5.0 if videos is not None: logger.warning( "`Qwen2VLImageProcessor` works only with image inputs and doesn't process videos anymore. " "This is a deprecated behavior and will be removed in v5.0. " "Your videos should be forwarded to `Qwen2VLVideoProcessor`. " ) videos = make_batched_videos(videos) pixel_values_videos, vision_grid_thws_videos = [], [] for images in videos: patches, video_grid_thw = self._preprocess( images, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, patch_size=patch_size, temporal_patch_size=temporal_patch_size, merge_size=merge_size, data_format=data_format, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, ) pixel_values_videos.extend(patches) vision_grid_thws_videos.append(video_grid_thw) data.update( { "pixel_values_videos": np.array(pixel_values_videos), "video_grid_thw": np.array(vision_grid_thws_videos), } ) return BatchFeature(data=data, tensor_type=return_tensors) def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): """ A utility that returns number of image patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of image patches per image. """ min_pixels = images_kwargs["min_pixels"] if "min_pixels" in images_kwargs else self.size["shortest_edge"] max_pixels = images_kwargs["max_pixels"] if "max_pixels" in images_kwargs else self.size["longest_edge"] patch_size = images_kwargs.get("patch_size", self.patch_size) merge_size = images_kwargs.get("merge_size", self.merge_size) factor = patch_size * merge_size resized_height, resized_width = smart_resize( height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels ) grid_h, grid_w = resized_height // patch_size, resized_width // patch_size return grid_h * grid_w __all__ = ["Qwen2VLImageProcessor"]
transformers/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py/0
{ "file_path": "transformers/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py", "repo_id": "transformers", "token_count": 11525 }
543
# coding=utf-8 # Copyright 2020, The RAG Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TFRAG model implementation.""" from __future__ import annotations import copy from dataclasses import dataclass import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...generation import TFLogitsProcessorList from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, keras, shape_list, unpack_inputs, ) from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "RagConfig" @dataclass class TFRetrievAugLMMarginOutput(ModelOutput): """ Base class for retriever augmented marginalized models outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`list[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None past_key_values: list[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: tuple[tf.Tensor, ...] | None = None question_enc_attentions: tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: tuple[tf.Tensor, ...] | None = None generator_enc_attentions: tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: tuple[tf.Tensor, ...] | None = None generator_dec_attentions: tuple[tf.Tensor, ...] | None = None @dataclass class TFRetrievAugLMOutput(ModelOutput): """ Args: logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head. The score is possibly marginalized over all documents for each vocabulary token. past_key_values (`list[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used (see `past_key_values` input) to speed up sequential decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*): Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute the `doc_scores`. retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*): The indexes of the embedded documents retrieved by the retriever. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden states at the output of the last layer of the question encoder pooled output of the model. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the generator encoder of the model. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor | None = None past_key_values: list[tf.Tensor] | None = None doc_scores: tf.Tensor | None = None retrieved_doc_embeds: tf.Tensor | None = None retrieved_doc_ids: tf.Tensor | None = None context_input_ids: tf.Tensor | None = None context_attention_mask: tf.Tensor | None = None question_encoder_last_hidden_state: tf.Tensor | None = None question_enc_hidden_states: tuple[tf.Tensor, ...] | None = None question_enc_attentions: tuple[tf.Tensor, ...] | None = None generator_enc_last_hidden_state: tf.Tensor | None = None generator_enc_hidden_states: tuple[tf.Tensor, ...] | None = None generator_enc_attentions: tuple[tf.Tensor, ...] | None = None generator_dec_hidden_states: tuple[tf.Tensor, ...] | None = None generator_dec_attentions: tuple[tf.Tensor, ...] | None = None class TFRagPreTrainedModel(TFPreTrainedModel): r""" RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://huggingface.co/papers/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a generator, the encoder and generator are trainable while the retriever is just an indexed dataset. """ config_class = RagConfig base_model_prefix = "rag" _keys_to_ignore_on_load_missing = [r"position_ids"] @classmethod def from_pretrained_question_encoder_generator( cls, question_encoder_pretrained_model_name_or_path: str | None = None, generator_pretrained_model_name_or_path: str | None = None, retriever: RagRetriever = None, *model_args, **kwargs, ) -> TFPreTrainedModel: r""" Instantiates an question encoder and a generator from one or two base classes of the library from pretrained model checkpoints. Params: question_encoder_pretrained_model_name_or_path (`str`, *optional*): Information necessary to initiate the question encoder. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-bert/bert-base-uncased`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `dbmdz/bert-base-german-cased`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case, `question_encoder_from_pt` should be set to `True`. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the generator. Can be either: - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g., `google-t5/t5-small`. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g., `facebook/bart-base`. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case, `generator_from_pt` should be set to `True`. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. retriever ([`RagRetriever`], *optional*): The retriever to use. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the question_encoder configuration, use the prefix *question_encoder_* for each configuration parameter. - To update the generator configuration, use the prefix *generator_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import RagRetriever, TFRagModel >>> # initialize a RAG from two pretrained models. >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small" ... ) >>> # alternatively, initialize from pytorch pretrained models can also be done >>> model = TFRagModel.from_pretrained_question_encoder_generator( ... "facebook/dpr-question_encoder-single-nq-base", ... "facebook/bart-base", ... generator_from_pt=True, ... question_encoder_from_pt=True, ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./rag") >>> # load retriever >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # load fine-tuned model with retriever >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever) ```""" kwargs_question_encoder = { argument[len("question_encoder_") :]: value for argument, value in kwargs.items() if argument.startswith("question_encoder_") } kwargs_generator = { argument[len("generator_") :]: value for argument, value in kwargs.items() if argument.startswith("generator_") } # remove question_encoder, generator kwargs from kwargs for key in kwargs_question_encoder: del kwargs["question_encoder_" + key] for key in kwargs_generator: del kwargs["generator_" + key] # Load and initialize the question_encoder and generator # The distinction between question_encoder and generator at the model level is made # by the value of the flag `is_generator` that we need to set correctly. question_encoder = kwargs_question_encoder.pop("model", None) if question_encoder is None: assert question_encoder_pretrained_model_name_or_path is not None, ( "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to" " be defined" ) from ..auto.modeling_tf_auto import TFAutoModel if "config" not in kwargs_question_encoder: from ..auto.configuration_auto import AutoConfig question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path) kwargs_question_encoder["config"] = question_encoder_config question_encoder = TFAutoModel.from_pretrained( question_encoder_pretrained_model_name_or_path, name="question_encoder", load_weight_prefix=cls.load_weight_prefix, *model_args, **kwargs_question_encoder, ) generator = kwargs_generator.pop("generator", None) if generator is None: assert generator_pretrained_model_name_or_path is not None, ( "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has" " to be defined" ) from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM if "config" not in kwargs_generator: from ..auto.configuration_auto import AutoConfig generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path) kwargs_generator["config"] = generator_config generator = TFAutoModelForSeq2SeqLM.from_pretrained( generator_pretrained_model_name_or_path, name="generator", load_weight_prefix=cls.load_weight_prefix, **kwargs_generator, ) # instantiate config with corresponding kwargs config = kwargs.get("config") if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever) RAG_START_DOCSTRING = r""" RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator. The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be any *seq2seq* model, preferably [`TFBartForConditionalGeneration`]. The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the outputs of a retriever in multiple steps---see examples for more details. The model is compatible any *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`. It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`] as the `generator`. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in SavedModel format. Args: config ([`RagConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. question_encoder ([`TFPreTrainedModel`]): An encoder model compatible with the faiss index encapsulated by the `retriever`. generator ([`TFPreTrainedModel`]): A seq2seq model used as the generator in the RAG architecture. retriever ([`RagRetriever`]): A retriever class encapsulating a faiss index queried to obtain context documents for current inputs. """ RAG_FORWARD_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to obtain the indices. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*) Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`, *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs * sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the generator's encoder. Used by the ([`TFRagModel`]) model during decoding. decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for generation tasks. `None` by default, construct as per instructions for the generator model you're using with your RAG instance. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. past_key_values (`tuple(tuple(tf.Tensor))`): Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used in the ([`RagTokenForGeneration`]) model during decoding. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores` has to be provided to the forward pass. `doc_scores` can be computed via `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`]. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_retrieved(`bool`, *optional*): Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask`. See returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple. n_docs (`int`, *optional*, defaults to `config.n_docs``) Number of documents to retrieve and/or number of documents for which to generate an answer. """ @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING) class TFRagModel(TFRagPreTrainedModel): load_weight_prefix = "tf_rag_model_1" def __init__( self, config: PretrainedConfig | None = None, question_encoder: TFPreTrainedModel | None = None, generator: TFPreTrainedModel | None = None, retriever: RagRetriever | None = None, load_weight_prefix: str | None = None, **kwargs, ): assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an question_encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) else: assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}" super().__init__(config, **kwargs) if question_encoder is None: from ..auto.modeling_tf_auto import TFAutoModel question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder") if generator is None: from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix generator = TFAutoModelForSeq2SeqLM.from_config( config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator" ) self.retriever = retriever if self.retriever is not None: assert isinstance(retriever, RagRetriever), ( f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`" ) self.retriever = retriever self.question_encoder = question_encoder self.generator = generator def set_retriever(self, retriever: RagRetriever): self.retriever = retriever @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, ) -> TFRetrievAugLMOutput: r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel >>> import torch from ...utils.deprecation import deprecate_kwarg from ...utils.deprecation import deprecate_kwarg from ...utils.deprecation import deprecate_kwarg from ...utils.deprecation import deprecate_kwarg >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> input_ids = input_dict["input_ids"] >>> outputs = model(input_ids) ```""" assert "decoder_cached_states" not in kwargs, ( "Please use past_key_values to cache intermediate outputs" ) # from modeling_tf_bart.py # aliasing to minimize code changing n_docs = n_docs if n_docs is not None else self.config.n_docs # whether retriever has to be used has_to_retrieve = ( self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and encoder_outputs is None ) # encoder_outputs are pre-computed during RAG-token generation if encoder_outputs is None: if has_to_retrieve: question_enc_outputs = self.question_encoder( input_ids, attention_mask=attention_mask, return_dict=True, training=training ) # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91 question_encoder_last_hidden_state = question_enc_outputs[ 0 ] # hidden states of question encoder => pooler_output retriever_outputs = self.retriever( input_ids, question_encoder_last_hidden_state.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = ( retriever_outputs["context_input_ids"], retriever_outputs["context_attention_mask"], retriever_outputs["retrieved_doc_embeds"], retriever_outputs["doc_ids"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32) # compute doc_scores doc_scores = tf.squeeze( tf.matmul( tf.expand_dims(question_encoder_last_hidden_state, axis=1), retrieved_doc_embeds, transpose_b=True, ), axis=1, ) else: assert context_input_ids is not None, ( "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can" " set a retriever using the `set_retriever(...)` function." ) assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function." ) assert (doc_scores.shape[1] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) # Decoder input without context documents if decoder_input_ids is not None: decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0) if decoder_attention_mask is not None: decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0) gen_outputs = self.generator( context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, return_dict=True, training=training, ) if not has_to_retrieve: question_encoder_last_hidden_state = None question_enc_hidden_states = None question_enc_attentions = None retrieved_doc_embeds = None retrieved_doc_ids = None else: question_enc_hidden_states = question_enc_outputs.hidden_states question_enc_attentions = question_enc_outputs.attentions if not has_to_retrieve or not output_retrieved: # don't output retrieved docs context_input_ids = (None,) context_attention_mask = None retrieved_doc_embeds = None retrieved_doc_ids = None return TFRetrievAugLMOutput( logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope(self.generator.name): self.generator.build(None) with tf.name_scope(self.question_encoder.name): self.question_encoder.build(None) @add_start_docstrings_to_model_forward( """ A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_token_for_generation_1/rag" def __init__( self, config: PretrainedConfig | None = None, question_encoder: TFPreTrainedModel | None = None, generator: TFPreTrainedModel | None = None, retriever: RagRetriever | None = None, **kwargs, ): assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs, ): if past_key_values is not None: # if past is defined use only last decoder_input_ids decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, "encoder_outputs": encoder_outputs, "doc_scores": doc_scores, "context_attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "past_key_values": past_key_values, "use_cache": use_cache, "do_marginalize": True, "n_docs": n_docs, } @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @staticmethod def _gather_beams(nested, beam_indices, batch_axis=0): """ RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates and takes care of the extra dimension for ndocs. """ def gather_fn(tensor): is_rag_cache = tensor.shape[0] != beam_indices.shape[0] if is_rag_cache: n_docs = tensor.shape[0] // beam_indices.shape[0] batch_size = beam_indices.shape[0] # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:])) gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) if is_rag_cache: # reshapes back into the shape expected by beam search gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:])) return gathered_tensor return tf.nest.map_structure(gather_fn, nested) def marginalize(self, seq_logits, doc_scores, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # RAG-token marginalization seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]]) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice log_prob_sum = seq_logprobs + doc_logprobs return tf.reduce_logsumexp(log_prob_sum, axis=1) @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, do_marginalize: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> TFRetrievAugLMMarginOutput: r""" do_marginalize (`bool`, *optional*): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Token model formulation See https://huggingface.co/papers/2005.11401 Section 2.1 for details about Rag-Token formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert "decoder_cached_states" not in kwargs, ( "Please use past_key_values to cache intermediate outputs" ) # from modeling_tf_bart.py do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None logits = outputs.logits if labels is not None: assert decoder_input_ids is not None loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) if do_marginalize: logits = self.marginalize(logits, outputs.doc_scores, n_docs) return TFRetrievAugLMMarginOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, doc_scores=outputs.doc_scores, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, n_docs=None, generation_config=None, logits_processor=TFLogitsProcessorList(), **kwargs, ): """ Implements TFRAG token decoding. Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. logits_processor (`TFLogitsProcessorList`, *optional*): Custom logits processors that complement the default logits processors built from arguments and a model's config. If a logit processor is passed that is already created with the arguments or a model's config an error is thrown. kwargs (`dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ # Handle `generation_config` and kwargs that might update it if generation_config is None: generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs # set default parameters n_docs = n_docs if n_docs is not None else self.config.n_docs # retrieve docs if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] out = self.retriever( input_ids, question_hidden_states.numpy().astype(np.float32), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", ) context_input_ids, context_attention_mask, retrieved_doc_embeds = ( out["context_input_ids"], out["context_attention_mask"], out["retrieved_doc_embeds"], ) context_input_ids = tf.cast(context_input_ids, tf.int32) context_attention_mask = tf.cast(context_attention_mask, tf.int32) retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32) # compute doc_scores doc_scores = tf.matmul( tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True ) doc_scores = tf.squeeze(doc_scores, axis=1) assert (context_input_ids.shape[0] % n_docs) == 0, ( f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is" f" {context_input_ids.shape[0]}." ) batch_size = context_input_ids.shape[0] // n_docs encoder = self.rag.generator.get_encoder() encoder_outputs = encoder( input_ids=context_input_ids, attention_mask=context_attention_mask, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, return_dict=True, ) decoder_input_ids = tf.fill( (batch_size * generation_config.num_beams, 1), tf.cast(generation_config.decoder_start_token_id, tf.int32), ) last_hidden_state = encoder_outputs["last_hidden_state"] def extend_enc_output(tensor, num_beams=None): """ Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs , d) Output: tensor of shape (batch_size*num_beams*n_docs , d) """ # expand batch_size & num_beam dimensions d_shape_list = tensor.shape[1:] # split n_docs dimensions new_shape = (batch_size, 1, n_docs) + d_shape_list tensor = tf.reshape(tensor, new_shape) # repeat same last hidden states over `num_beams` dimension new_shape = (batch_size, num_beams, n_docs) + d_shape_list tensor = tf.broadcast_to(tensor, new_shape) # merge `batch_size`, `num_beams`, `num_docs` dims again new_shape = (batch_size * num_beams * n_docs,) + d_shape_list return tf.reshape(tensor, new_shape) # correctly extend last_hidden_state and attention mask context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams) encoder_outputs["last_hidden_state"] = extend_enc_output( last_hidden_state, num_beams=generation_config.num_beams ) doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0) # define start_len & additional parameters model_kwargs["doc_scores"] = doc_scores model_kwargs["encoder_outputs"] = encoder_outputs model_kwargs["attention_mask"] = context_attention_mask model_kwargs["n_docs"] = n_docs pre_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=tf.shape(decoder_input_ids)[-1], logits_processor=logits_processor, ) if generation_config.num_beams == 1: return self.greedy_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) elif generation_config.num_beams > 1: if generation_config.num_beams < generation_config.num_return_sequences: raise ValueError( "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" f" num_return_sequences, got {generation_config.num_beams} and" f" {generation_config.num_return_sequences} (respectively)" ) def unflatten_beam_dim(tensor): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" shape = shape_list(tensor) return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:]) decoder_input_ids = unflatten_beam_dim(decoder_input_ids) model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"]) model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) return self.beam_search( input_ids=decoder_input_ids, max_length=generation_config.max_length, pad_token_id=generation_config.pad_token_id, eos_token_id=generation_config.eos_token_id, logits_processor=pre_processor, output_attentions=generation_config.output_attentions, output_hidden_states=generation_config.output_hidden_states, output_scores=generation_config.output_scores, return_dict_in_generate=generation_config.return_dict_in_generate, **model_kwargs, ) else: raise ValueError( f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}" ) def get_input_embeddings(self): return self.rag.generator.get_input_embeddings() def get_output_embeddings(self): return self.rag.generator.get_output_embeddings() # Adapted from tf_t5's & tf_bart's _shift_right def shift_tokens_right(self, input_ids, start_token_id=None): """Shift input ids one token to the right, and pad with start_token_id""" if start_token_id is None: start_token_id = self.generator.config.decoder_start_token_id assert start_token_id is not None, ( "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as" " generator, see Bart docs for more information" ) pad_token_id = self.generator.config.pad_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # nll stands for 'negative log likelihood' def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None): n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left (from original Pytorch's version) target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) return loss # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False): """CrossEntropyLoss that ignores pad tokens""" # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things # and I don't feel comfortable converting it. loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.SUM, ) if from_logits is False: # convert to logits eps = 1e-9 y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps) y_pred = tf.math.log(y_pred) logits = y_pred melted_labels = tf.reshape(labels, (-1,)) active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id) reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss) labels = tf.boolean_mask(melted_labels, active_loss) nll_loss = loss_fn(labels, reduced_logits) smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1) smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch eps_i = smooth_epsilon / reduced_logits.shape[-1] loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss return loss def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) @add_start_docstrings_to_model_forward( """ A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass. """, RAG_START_DOCSTRING, ) class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss): load_weight_prefix = "tf_rag_sequence_for_generation_1/rag" def __init__( self, config: PretrainedConfig | None = None, question_encoder: TFPreTrainedModel | None = None, generator: TFPreTrainedModel | None = None, retriever: RagRetriever | None = None, **kwargs, ): assert config is not None or (question_encoder is not None and generator is not None), ( "Either a configuration or an encoder and a generator has to be provided." ) if config is None: config = RagConfig.from_question_encoder_generator_configs( question_encoder.config, generator.config, **kwargs ) super().__init__(config) # instantiate model self.rag = TFRagModel( config=config, question_encoder=question_encoder, generator=generator, retriever=retriever, load_weight_prefix=self.load_weight_prefix, name="rag", ) def set_retriever(self, retriever: RagRetriever): self.rag.retriever = retriever @property def retriever(self): return self.rag.retriever @property def generator(self): return self.rag.generator @property def question_encoder(self): return self.rag.question_encoder @unpack_inputs @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, decoder_input_ids: np.ndarray | tf.Tensor | None = None, decoder_attention_mask: np.ndarray | tf.Tensor | None = None, encoder_outputs: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, doc_scores: np.ndarray | tf.Tensor | None = None, context_input_ids: np.ndarray | tf.Tensor | None = None, context_attention_mask: np.ndarray | tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, output_retrieved: bool | None = None, n_docs: int | None = None, exclude_bos_score: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, reduce_loss: bool | None = None, return_dict: bool | None = None, training: bool = False, **kwargs, # needs kwargs for generation ) -> tuple[tf.Tensor] | TFRetrievAugLMMarginOutput: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing the loss. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See https://huggingface.co/papers/2005.11401 Section 2.1 for details about Rag-Sequence formulation. Indices should be in `[0, ..., config.vocab_size - 1]`. reduce_loss (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum` operation. kwargs (`dict[str, any]`, *optional*, defaults to `{}`): Legacy dictionary, which is required so that model can use *generate()* function. Returns: Example: ```python >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq") >>> retriever = RagRetriever.from_pretrained( ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True ... ) >>> # initialize with RagRetriever to do everything in one forward call >>> model = TFRagSequenceForGeneration.from_pretrained( ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True ... ) >>> input_dict = tokenizer.prepare_seq2seq_batch( ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf" ... ) >>> outputs = model(input_dict, output_retrieved=True) >>> # or use retriever separately >>> # 1. Encode >>> input_ids = input_dict["input_ids"] >>> question_hidden_states = model.question_encoder(input_ids)[0] >>> # 2. Retrieve >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf") >>> doc_scores = tf.squeeze( ... tf.matmul( ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True ... ), ... axis=1, ... ) >>> # 3. Forward to generator >>> outputs = model( ... inputs=None, ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... decoder_input_ids=input_dict["labels"], ... ) >>> # or directly generate >>> generated = model.generate( ... context_input_ids=docs_dict["context_input_ids"], ... context_attention_mask=docs_dict["context_attention_mask"], ... doc_scores=doc_scores, ... ) >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True) ```""" assert "decoder_cached_states" not in kwargs, ( "Please use past_key_values to cache intermediate outputs" ) # from modeling_tf_bart.py exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss if labels is not None: if decoder_input_ids is None: decoder_input_ids = labels use_cache = False outputs = self.rag( input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs, training=training, ) loss = None if labels is not None: loss = self.get_nll( outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs, ) return TFRetrievAugLMMarginOutput( loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, ) def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left target = tf.concat( [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], axis=1, ) # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id n_docs = n_docs if n_docs is not None else self.config.n_docs equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id)) use_bos = bos_token_id is not None and equal_bos_token_id_all def _mask_pads(ll, smooth_obj): pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype)) if tf.reduce_any(pad_mask): ll = tf.where(pad_mask, 0.0, ll) smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1) # seq_logits.shape = (batch*n_docs, tgt_len , vocabs) seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1) seq_logprobs = tf.reshape( seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]) ) # (batch_size, n_docs, tgt_len, vocabs) doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D # RAG-sequence marginalization first_token_scores = seq_logprobs[:, :, :1, :] second_token_scores = seq_logprobs[:, :, 1:2, :] remainder = seq_logprobs[:, :, 2:, :] rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2) # calculate loss target = tf.expand_dims(target, axis=1) # n_docs dimension target = tf.expand_dims(target, axis=-1) # logits dimension target = tf.repeat(target, n_docs, axis=1) assert len(target.shape) == len(rag_logprobs.shape) # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering def torch_gather(param, id_tensor): # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather def gather2d(target, id_tensor): idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1) result = tf.gather_nd(target, idx) return tf.expand_dims(result, axis=-1) target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D target_shape = id_tensor.shape id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index result = gather2d(target, id_tensor) return tf.reshape(result, target_shape) ll = torch_gather(rag_logprobs, id_tensor=target) smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits ll, smooth_obj = _mask_pads(ll, smooth_obj) # sum over tokens, exclude bos while scoring if exclude_bos_score and use_bos: ll = tf.reduce_sum(ll[:, :, 1:], axis=2) else: ll = tf.reduce_sum(ll, axis=2) smooth_obj = tf.reduce_sum(smooth_obj, axis=2) ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1) nll_loss = -ll smooth_loss = -smooth_obj if reduce_loss: nll_loss = tf.reduce_sum(nll_loss) smooth_loss = tf.reduce_sum(smooth_loss) eps_i = epsilon / rag_logprobs.shape[-1] loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss def generate( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, context_input_ids=None, context_attention_mask=None, doc_scores=None, do_deduplication=None, # defaults to True num_return_sequences=None, # defaults to 1 num_beams=None, # defaults to 1 n_docs=None, **model_kwargs, ): """ Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation for more information on how to set other generate input parameters Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `input_ids` is not passed, then `context_input_ids` has to be provided. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Input IDs post-processed from the retrieved documents and the question encoder input_ids by the retriever. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are returned by [`~RagRetriever.__call__`]. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`): Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`]. do_deduplication (`bool`, *optional*): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. num_return_sequences(`int`, *optional*, defaults to 1): The number of independently computed returned sequences for each element in the batch. Note that this is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function, where we set `num_return_sequences` to `num_beams`. num_beams (`int`, *optional*, defaults to 1): Number of beams for beam search. 1 means no beam search. n_docs (`int`, *optional*, defaults to `config.n_docs`) Number of documents to retrieve and/or number of documents for which to generate an answer. kwargs (`dict[str, Any]`, *optional*): Additional kwargs will be passed to [`~generation.GenerationMixin.generate`] Return: `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early due to the `eos_token_id`. """ n_docs = n_docs if n_docs is not None else self.config.n_docs do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication num_doc_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) num_beams = num_beams if num_beams is not None else self.config.num_beams assert input_ids is not None or context_input_ids is not None, ( " At least one of input_ids or context_input_ids must be given" ) if self.retriever is not None and context_input_ids is None: question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0] context_input_ids = self.retriever( input_ids, question_hidden_states.numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors="tf", )["context_input_ids"] hypos = [] model_kwargs["num_beams"] = num_beams model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences model_kwargs["attention_mask"] = None batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs for index in range(batch_size): # first, generate beams from documents: generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len) output_sequences = self.generator.generate( generator_input_ids, **model_kwargs, ) # n_docs * n_beam, tgt_len if do_deduplication: # do_deduplication -- for TF, work on Eager mode only! output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values())) num_candidates = output_sequences.shape[ 0 ] # after deduplication, this number can be less than n_docs*n_beam # then, run model forwards to get nll scores: if input_ids is not None: new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1)) outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True) else: # input_ids is None, need context_input_ids/mask and doc_scores assert context_attention_mask is not None, ( "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you" " can set a retriever using the `set_retriever(...)` function." ) assert doc_scores is not None, ( "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a" " retriever using the `set_retriever(...)` function." ) individual_input_ids = tf.tile( generator_input_ids, (num_candidates, 1) ) # (num_candidates*n_docs, max_len) individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs] individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1)) individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs] individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs] outputs = self( input_ids=None, context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True, ) top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1] # add hypothesis hypos.append(tf.gather(output_sequences, top_cand_inds)) return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id) @staticmethod def _cat_and_pad(tensors, pad_token_id): # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch # Initialize padded tensor with shape ( all_candidates , max_candidate_length ), # where all_candidates counted from all inputs new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors]) output = tf.fill(new_shape, pad_token_id) # Normal tensor doesn't support slice assignment, so we need tf.Variable output = tf.Variable(output) # Assign, and then convert back to tensor ind = 0 for t in tensors: output[ind : ind + t.shape[0], : t.shape[1]].assign(t) ind += t.shape[0] output = tf.convert_to_tensor(output) return tf.cast(output, tensors[0][0][0].dtype) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "rag", None) is not None: with tf.name_scope(self.rag.name): self.rag.build(None) __all__ = ["TFRagModel", "TFRagPreTrainedModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration"]
transformers/src/transformers/models/rag/modeling_tf_rag.py/0
{ "file_path": "transformers/src/transformers/models/rag/modeling_tf_rag.py", "repo_id": "transformers", "token_count": 38435 }
544
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RegNet checkpoints from timm and vissl.""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Optional import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetY32gf, RegNetY64gf, RegNetY128gf from huggingface_hub import hf_hub_download from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger() @dataclass class Tracker: module: nn.Module traced: list[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d)) if has_not_submodules: self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 1 src_skip: list = field(default_factory=list) dest_skip: list = field(default_factory=list) raise_if_mismatch: bool = True def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced) and self.raise_if_mismatch: raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transferred from={src_m} to={dest_m}") class FakeRegNetVisslWrapper(nn.Module): """ Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file. """ def __init__(self, model: nn.Module): super().__init__() feature_blocks: list[tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block"), f"Unexpected layer name {k}" block_index = len(feature_blocks) + 1 feature_blocks.append((f"res{block_index}", v)) self._feature_blocks = nn.ModuleDict(feature_blocks) def forward(self, x: Tensor): return get_trunk_forward_outputs( x, out_feat_keys=None, feature_blocks=self._feature_blocks, ) class NameToFromModelFuncMap(dict): """ A Dictionary with some additional logic to return a function that creates the correct original model. """ def convert_name_to_timm(self, x: str) -> str: x_split = x.split("-") return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) def __getitem__(self, x: str) -> Callable[[], tuple[nn.Module, dict]]: # default to timm! if x not in self: x = self.convert_name_to_timm(x) val = partial(lambda: (timm.create_model(x, pretrained=True).eval(), None)) else: val = super().__getitem__(x) return val class NameToOurModelFuncMap(dict): """ A Dictionary with some additional logic to return the correct hugging face RegNet class reference. """ def __getitem__(self, x: str) -> Callable[[], nn.Module]: if "seer" in x and "in1k" not in x: val = RegNetModel else: val = RegNetForImageClassification return val def manually_copy_vissl_head(from_state_dict, to_state_dict, keys: list[tuple[str, str]]): for from_key, to_key in keys: to_state_dict[to_key] = from_state_dict[from_key].clone() print(f"Copied key={from_key} to={to_key}") return to_state_dict def convert_weight_and_push( name: str, from_model_func: Callable[[], nn.Module], our_model_func: Callable[[], nn.Module], config: RegNetConfig, save_directory: Path, push_to_hub: bool = True, ): print(f"Converting {name}...") with torch.no_grad(): from_model, from_state_dict = from_model_func() our_model = our_model_func(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model, raise_if_mismatch=False) x = torch.randn((1, 3, 224, 224)) module_transfer(x) if from_state_dict is not None: keys = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: keys = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] to_state_dict = manually_copy_vissl_head(from_state_dict, our_model.state_dict(), keys) our_model.load_state_dict(to_state_dict) our_outputs = our_model(x, output_hidden_states=True) our_output = ( our_outputs.logits if isinstance(our_model, RegNetForImageClassification) else our_outputs.last_hidden_state ) from_output = from_model(x) from_output = from_output[-1] if isinstance(from_output, list) else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: our_output = our_outputs.hidden_states[-1] assert torch.allclose(from_output, our_output), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add model", use_temp_dir=True, ) size = 224 if "seer" not in name else 384 # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add image processor", use_temp_dir=True, ) print(f"Pushed {name}") def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 expected_shape = (1, num_labels) repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), # models created by SEER -> https://huggingface.co/papers/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } names_to_ours_model_map = NameToOurModelFuncMap() names_to_from_model_map = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(checkpoint_url: str, model_func: Callable[[], nn.Module]) -> tuple[nn.Module, dict]: files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu") model = model_func() # check if we have a head, if yes add it model_state_dict = files["classy_state_dict"]["base_model"]["model"] state_dict = model_state_dict["trunk"] model.load_state_dict(state_dict) return model.eval(), model_state_dict["heads"] # pretrained names_to_from_model_map["regnet-y-320-seer"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", lambda: FakeRegNetVisslWrapper(RegNetY32gf()), ) names_to_from_model_map["regnet-y-640-seer"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetY64gf()), ) names_to_from_model_map["regnet-y-1280-seer"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetY128gf()), ) names_to_from_model_map["regnet-y-10b-seer"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) ), ) # IN1K finetuned names_to_from_model_map["regnet-y-320-seer-in1k"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetY32gf()), ) names_to_from_model_map["regnet-y-640-seer-in1k"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetY64gf()), ) names_to_from_model_map["regnet-y-1280-seer-in1k"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetY128gf()), ) names_to_from_model_map["regnet-y-10b-seer-in1k"] = partial( load_using_classy_vision, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52)) ), ) if model_name: convert_weight_and_push( model_name, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], save_directory, push_to_hub, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( model_name, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], config, save_directory, push_to_hub, ) return config, expected_shape if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/regnet/convert_regnet_to_pytorch.py", "repo_id": "transformers", "token_count": 8465 }
545
# coding=utf-8 # Copyright 2022 Microsoft Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow ResNet model.""" from typing import Optional, Union import tensorflow as tf from ...activations_tf import ACT2FN from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFImageClassifierOutputWithNoAttention, ) from ...modeling_tf_utils import ( TFPreTrainedModel, TFSequenceClassificationLoss, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_resnet import ResNetConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "ResNetConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/resnet-50" _EXPECTED_OUTPUT_SHAPE = [1, 2048, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/resnet-50" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" class TFResNetConvLayer(keras.layers.Layer): def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, activation: str = "relu", **kwargs, ) -> None: super().__init__(**kwargs) self.pad_value = kernel_size // 2 self.conv = keras.layers.Conv2D( out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else keras.layers.Activation("linear") self.in_channels = in_channels self.out_channels = out_channels def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor: # Pad to match that done in the PyTorch Conv2D model height_pad = width_pad = (self.pad_value, self.pad_value) hidden_state = tf.pad(hidden_state, [(0, 0), height_pad, width_pad, (0, 0)]) hidden_state = self.conv(hidden_state) return hidden_state def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, training=training) hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv", None) is not None: with tf.name_scope(self.conv.name): self.conv.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFResNetEmbeddings(keras.layers.Layer): """ ResNet Embeddings (stem) composed of a single aggressive convolution. """ def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) self.embedder = TFResNetConvLayer( config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act, name="embedder", ) self.pooler = keras.layers.MaxPool2D(pool_size=3, strides=2, padding="valid", name="pooler") self.num_channels = config.num_channels def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor: _, _, _, num_channels = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) hidden_state = pixel_values hidden_state = self.embedder(hidden_state) hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]]) hidden_state = self.pooler(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFResNetShortCut(keras.layers.Layer): """ ResNet shortcut, used to project the residual features to the correct size. If needed, it is also used to downsample the input using `stride=2`. """ def __init__(self, in_channels: int, out_channels: int, stride: int = 2, **kwargs) -> None: super().__init__(**kwargs) self.convolution = keras.layers.Conv2D( out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent self.normalization = keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.in_channels = in_channels self.out_channels = out_channels def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_state = x hidden_state = self.convolution(hidden_state) hidden_state = self.normalization(hidden_state, training=training) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convolution", None) is not None: with tf.name_scope(self.convolution.name): self.convolution.build([None, None, None, self.in_channels]) if getattr(self, "normalization", None) is not None: with tf.name_scope(self.normalization.name): self.normalization.build([None, None, None, self.out_channels]) class TFResNetBasicLayer(keras.layers.Layer): """ A classic ResNet's residual layer composed by two `3x3` convolutions. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", **kwargs ) -> None: super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 self.conv1 = TFResNetConvLayer(in_channels, out_channels, stride=stride, name="layer.0") self.conv2 = TFResNetConvLayer(out_channels, out_channels, activation=None, name="layer.1") self.shortcut = ( TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.activation = ACT2FN[activation] def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: residual = hidden_state hidden_state = self.conv1(hidden_state, training=training) hidden_state = self.conv2(hidden_state, training=training) residual = self.shortcut(residual, training=training) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build(None) if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) class TFResNetBottleNeckLayer(keras.layers.Layer): """ A classic ResNet's bottleneck layer composed by three `3x3` convolutions. The first `1x1` convolution reduces the input by a factor of `reduction` in order to make the second `3x3` convolution faster. The last `1x1` convolution remaps the reduced features to `out_channels`. """ def __init__( self, in_channels: int, out_channels: int, stride: int = 1, activation: str = "relu", reduction: int = 4, **kwargs, ) -> None: super().__init__(**kwargs) should_apply_shortcut = in_channels != out_channels or stride != 1 reduces_channels = out_channels // reduction self.conv0 = TFResNetConvLayer(in_channels, reduces_channels, kernel_size=1, name="layer.0") self.conv1 = TFResNetConvLayer(reduces_channels, reduces_channels, stride=stride, name="layer.1") self.conv2 = TFResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None, name="layer.2") self.shortcut = ( TFResNetShortCut(in_channels, out_channels, stride=stride, name="shortcut") if should_apply_shortcut else keras.layers.Activation("linear", name="shortcut") ) self.activation = ACT2FN[activation] def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: residual = hidden_state hidden_state = self.conv0(hidden_state, training=training) hidden_state = self.conv1(hidden_state, training=training) hidden_state = self.conv2(hidden_state, training=training) residual = self.shortcut(residual, training=training) hidden_state += residual hidden_state = self.activation(hidden_state) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv0", None) is not None: with tf.name_scope(self.conv0.name): self.conv0.build(None) if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(None) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build(None) if getattr(self, "shortcut", None) is not None: with tf.name_scope(self.shortcut.name): self.shortcut.build(None) class TFResNetStage(keras.layers.Layer): """ A ResNet stage composed of stacked layers. """ def __init__( self, config: ResNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs ) -> None: super().__init__(**kwargs) layer = TFResNetBottleNeckLayer if config.layer_type == "bottleneck" else TFResNetBasicLayer layers = [layer(in_channels, out_channels, stride=stride, activation=config.hidden_act, name="layers.0")] layers += [ layer(out_channels, out_channels, activation=config.hidden_act, name=f"layers.{i + 1}") for i in range(depth - 1) ] self.stage_layers = layers def call(self, hidden_state: tf.Tensor, training: bool = False) -> tf.Tensor: for layer in self.stage_layers: hidden_state = layer(hidden_state, training=training) return hidden_state def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "stage_layers", None) is not None: for layer in self.stage_layers: with tf.name_scope(layer.name): layer.build(None) class TFResNetEncoder(keras.layers.Layer): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages = [ TFResNetStage( config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name="stages.0", ) ] for i, (in_channels, out_channels, depth) in enumerate( zip(config.hidden_sizes, config.hidden_sizes[1:], config.depths[1:]) ): self.stages.append(TFResNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i + 1}")) def call( self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> TFBaseModelOutputWithNoAttention: hidden_states = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: hidden_states = hidden_states + (hidden_state,) hidden_state = stage_module(hidden_state, training=training) if output_hidden_states: hidden_states = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "stages", None) is not None: for layer in self.stages: with tf.name_scope(layer.name): layer.build(None) class TFResNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ResNetConfig base_model_prefix = "resnet" main_input_name = "pixel_values" @property def input_signature(self): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.float32)} RESNET_START_DOCSTRING = r""" This model is a TensorFlow [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ RESNET_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @keras_serializable class TFResNetMainLayer(keras.layers.Layer): config_class = ResNetConfig def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config self.embedder = TFResNetEmbeddings(config, name="embedder") self.encoder = TFResNetEncoder(config, name="encoder") self.pooler = keras.layers.GlobalAveragePooling2D(keepdims=True) @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 image layers can't use NCHW format when running on CPU. # We transpose to NHWC format and then transpose back after the full forward pass. # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) embedding_output = self.embedder(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Transpose all the outputs to the NCHW format # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width) last_hidden_state = tf.transpose(last_hidden_state, (0, 3, 1, 2)) pooled_output = tf.transpose(pooled_output, (0, 3, 1, 2)) hidden_states = () for hidden_state in encoder_outputs[1:]: hidden_states = hidden_states + tuple(tf.transpose(h, (0, 3, 1, 2)) for h in hidden_state) if not return_dict: return (last_hidden_state, pooled_output) + hidden_states hidden_states = hidden_states if output_hidden_states else None return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embedder", None) is not None: with tf.name_scope(self.embedder.name): self.embedder.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top.", RESNET_START_DOCSTRING, ) class TFResNetModel(TFResNetPreTrainedModel): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.resnet = TFResNetMainLayer(config=config, name="resnet") @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) @unpack_inputs def call( self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple[tf.Tensor], TFBaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict resnet_outputs = self.resnet( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return resnet_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "resnet", None) is not None: with tf.name_scope(self.resnet.name): self.resnet.build(None) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, RESNET_START_DOCSTRING, ) class TFResNetForImageClassification(TFResNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: ResNetConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.num_labels = config.num_labels self.resnet = TFResNetMainLayer(config, name="resnet") # classification head self.classifier_layer = ( keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier.1") ) self.config = config def classifier(self, x: tf.Tensor) -> tf.Tensor: x = keras.layers.Flatten()(x) logits = self.classifier_layer(x) return logits @add_start_docstrings_to_model_forward(RESNET_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) @unpack_inputs def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple[tf.Tensor], TFImageClassifierOutputWithNoAttention]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.resnet( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training ) pooled_output = outputs.pooler_output if return_dict else outputs[1] logits = self.classifier(pooled_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "resnet", None) is not None: with tf.name_scope(self.resnet.name): self.resnet.build(None) if getattr(self, "classifier_layer", None) is not None: with tf.name_scope(self.classifier_layer.name): self.classifier_layer.build([None, None, self.config.hidden_sizes[-1]]) __all__ = ["TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel"]
transformers/src/transformers/models/resnet/modeling_tf_resnet.py/0
{ "file_path": "transformers/src/transformers/models/resnet/modeling_tf_resnet.py", "repo_id": "transformers", "token_count": 10035 }
546
# coding=utf-8 # Copyright 2022 WeChatAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RoCBert model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class RoCBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a RoCBert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RoCBert [weiweishi/roc-bert-base-zh](https://huggingface.co/weiweishi/roc-bert-base-zh) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RoCBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`RoCBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. enable_pronunciation (`bool`, *optional*, defaults to `True`): Whether or not the model use pronunciation embed when training. enable_shape (`bool`, *optional*, defaults to `True`): Whether or not the model use shape embed when training. pronunciation_embed_dim (`int`, *optional*, defaults to 768): Dimension of the pronunciation_embed. pronunciation_vocab_size (`int`, *optional*, defaults to 910): Pronunciation Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `input_pronunciation_ids` passed when calling [`RoCBertModel`]. shape_embed_dim (`int`, *optional*, defaults to 512): Dimension of the shape_embed. shape_vocab_size (`int`, *optional*, defaults to 24858): Shape Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the `input_shape_ids` passed when calling [`RoCBertModel`]. concat_input (`bool`, *optional*, defaults to `True`): Defines the way of merging the shape_embed, pronunciation_embed and word_embed, if the value is true, output_embed = torch.cat((word_embed, shape_embed, pronunciation_embed), -1), else output_embed = (word_embed + shape_embed + pronunciation_embed) / 3 Example: ```python >>> from transformers import RoCBertModel, RoCBertConfig >>> # Initializing a RoCBert weiweishi/roc-bert-base-zh style configuration >>> configuration = RoCBertConfig() >>> # Initializing a model from the weiweishi/roc-bert-base-zh style configuration >>> model = RoCBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "roc_bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, position_embedding_type="absolute", classifier_dropout=None, enable_pronunciation=True, enable_shape=True, pronunciation_embed_dim=768, pronunciation_vocab_size=910, shape_embed_dim=512, shape_vocab_size=24858, concat_input=True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.enable_pronunciation = enable_pronunciation self.enable_shape = enable_shape self.pronunciation_embed_dim = pronunciation_embed_dim self.pronunciation_vocab_size = pronunciation_vocab_size self.shape_embed_dim = shape_embed_dim self.shape_vocab_size = shape_vocab_size self.concat_input = concat_input self.position_embedding_type = position_embedding_type self.classifier_dropout = classifier_dropout super().__init__(pad_token_id=pad_token_id, **kwargs) __all__ = ["RoCBertConfig"]
transformers/src/transformers/models/roc_bert/configuration_roc_bert.py/0
{ "file_path": "transformers/src/transformers/models/roc_bert/configuration_roc_bert.py", "repo_id": "transformers", "token_count": 3162 }
547
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert SAM checkpoints from the original repository. URL: https://github.com/facebookresearch/segment-anything. Also supports converting the SlimSAM checkpoints from https://github.com/czg1225/SlimSAM/tree/master. """ import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) def get_config(model_name): if "slimsam-50" in model_name: vision_config = SamVisionConfig( hidden_size=384, mlp_dim=1536, num_hidden_layers=12, num_attention_heads=12, global_attn_indexes=[2, 5, 8, 11], ) elif "slimsam-77" in model_name: vision_config = SamVisionConfig( hidden_size=168, mlp_dim=696, num_hidden_layers=12, num_attention_heads=12, global_attn_indexes=[2, 5, 8, 11], ) elif "sam_vit_b" in model_name: vision_config = SamVisionConfig() elif "sam_vit_l" in model_name: vision_config = SamVisionConfig( hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) elif "sam_vit_h" in model_name: vision_config = SamVisionConfig( hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) config = SamConfig( vision_config=vision_config, ) return config KEYS_TO_MODIFY_MAPPING = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def replace_keys(state_dict): model_state_dict = {} state_dict.pop("pixel_mean", None) state_dict.pop("pixel_std", None) output_hypernetworks_mlps_pattern = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) if re.match(output_hypernetworks_mlps_pattern, key): layer_nb = int(re.match(output_hypernetworks_mlps_pattern, key).group(2)) if layer_nb == 0: key = key.replace("layers.0", "proj_in") elif layer_nb == 1: key = key.replace("layers.1", "layers.0") elif layer_nb == 2: key = key.replace("layers.2", "proj_out") model_state_dict[key] = value model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def convert_sam_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub): config = get_config(model_name) state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True) state_dict = replace_keys(state_dict) image_processor = SamImageProcessor() processor = SamProcessor(image_processor=image_processor) hf_model = SamModel(config) hf_model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" hf_model.load_state_dict(state_dict) hf_model = hf_model.to(device) img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") input_points = [[[500, 375]]] input_labels = [[1]] inputs = processor(images=np.array(raw_image), return_tensors="pt").to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() if model_name == "sam_vit_b_01ec64": inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() elif model_name == "sam_vit_h_4b8939": inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 input_boxes = ((75, 275, 1725, 850),) inputs = processor(images=np.array(raw_image), input_boxes=input_boxes, return_tensors="pt").to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): output = hf_model(**inputs) scores = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if pytorch_dump_folder is not None: processor.save_pretrained(pytorch_dump_folder) hf_model.save_pretrained(pytorch_dump_folder) if push_to_hub: repo_id = f"nielsr/{model_name}" if "slimsam" in model_name else f"meta/{model_name}" processor.push_to_hub(repo_id) hf_model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() choices = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195", "slimsam-50-uniform", "slimsam-77-uniform"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Name of the original model to convert", ) parser.add_argument( "--checkpoint_path", type=str, required=False, help="Path to the original checkpoint", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) args = parser.parse_args() if "slimsam" in args.model_name: checkpoint_path = args.checkpoint_path if checkpoint_path is None: raise ValueError("You need to provide a checkpoint path for SlimSAM models.") else: checkpoint_path = hf_hub_download("ybelkada/segment-anything", f"checkpoints/{args.model_name}.pth") convert_sam_checkpoint(args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/sam/convert_sam_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/sam/convert_sam_to_hf.py", "repo_id": "transformers", "token_count": 3759 }
548
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/sam2_video/modular_sam2_video.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_sam2_video.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections import OrderedDict from dataclasses import dataclass from typing import Any, Callable, Iterator, Optional, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from tqdm import tqdm from transformers.utils.generic import OutputRecorder, TransformersKwargs from ...activations import ACT2FN from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ( ModelOutput, auto_docstring, ) from ..auto import AutoModel from .configuration_sam2_video import Sam2VideoConfig, Sam2VideoMaskDecoderConfig, Sam2VideoPromptEncoderConfig class Sam2VideoInferenceCache: """Cache for vision features and model constants.""" def __init__( self, inference_device: Union[torch.device, str] = "cpu", inference_state_device: Union[torch.device, str] = "cpu", max_vision_features_cache_size: int = 1, ): self.inference_device = inference_device self.inference_state_device = inference_state_device self.max_vision_features_cache_size = max_vision_features_cache_size self._vision_features = {} def cache_vision_features(self, frame_idx: int, features: dict): """Cache vision features with automatic device management.""" cached = {} if len(self._vision_features) >= self.max_vision_features_cache_size: # remove the oldest frame self._vision_features.pop(min(self._vision_features.keys())) for key, value in features.items(): if isinstance(value, torch.Tensor): cached[key] = value.to(self.inference_state_device, non_blocking=True) elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor): cached[key] = [v.to(self.inference_state_device, non_blocking=True) for v in value] else: cached[key] = value self._vision_features[frame_idx] = cached def get_vision_features(self, frame_idx: int) -> Optional[dict]: """Get cached vision features, automatically moved to inference device.""" if frame_idx not in self._vision_features: return None cached = self._vision_features[frame_idx] moved = {} for key, value in cached.items(): if isinstance(value, torch.Tensor): moved[key] = value.to(self.inference_device, non_blocking=True) elif isinstance(value, (list, tuple)) and value and isinstance(value[0], torch.Tensor): moved[key] = [v.to(self.inference_device, non_blocking=True) for v in value] else: moved[key] = value return moved def clear_all(self): """Clear all cached data.""" self._vision_features.clear() class Sam2VideoInferenceSession: r""" Manages video inference session parameters, state and cache. Args: video (`torch.FloatTensor`, *optional*): The video to process. No need to provide when streaming. video_height (`int`, *optional*): The height of the video. video_width (`int`, *optional*): The width of the video. inference_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to use for inference. inference_state_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to store the inference state on. video_storage_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to store the video on. dtype (`torch.dtype`, *optional*, defaults to `"float32"`): The dtype to use for the video. max_vision_features_cache_size (`int`, *optional*, defaults to 1): The maximum number of vision features to cache. """ def __init__( self, video: torch.FloatTensor = None, video_height: Optional[int] = None, video_width: Optional[int] = None, inference_device: Union[torch.device, str] = "cpu", inference_state_device: Union[torch.device, str] = "cpu", video_storage_device: Union[torch.device, str] = "cpu", dtype: Union[torch.dtype, str] = "float32", max_vision_features_cache_size: int = 1, ): # store as a list to avoid double memory allocation with torch.cat when adding new frames self.processed_frames = list(video.to(video_storage_device, dtype=dtype)) if video is not None else None self.video_height = video_height self.video_width = video_width self.inference_device = inference_device self.inference_state_device = inference_state_device self.video_storage_device = video_storage_device self.dtype = dtype self.max_vision_features_cache_size = max_vision_features_cache_size # Cache for computed features self.cache = Sam2VideoInferenceCache( inference_device=self.inference_device, inference_state_device=self.inference_state_device, max_vision_features_cache_size=self.max_vision_features_cache_size, ) # Persistent object tracking state self._obj_id_to_idx = OrderedDict() self._obj_idx_to_id = OrderedDict() self.obj_ids = [] # Persistent user inputs self.point_inputs_per_obj = {} self.mask_inputs_per_obj = {} # Persistent model outputs/history self.output_dict_per_obj = {} self.frames_tracked_per_obj = {} # Session state flags self.obj_with_new_inputs = [] @property def num_frames(self) -> Optional[int]: return len(self.processed_frames) if self.processed_frames is not None else None # Object management def obj_id_to_idx(self, obj_id: int) -> int: """Map object ID to index, creating new entry if needed.""" obj_idx = self._obj_id_to_idx.get(obj_id, None) if obj_idx is not None: return obj_idx obj_idx = len(self._obj_id_to_idx) self._obj_id_to_idx[obj_id] = obj_idx self._obj_idx_to_id[obj_idx] = obj_id self.obj_ids = list(self._obj_id_to_idx) self.point_inputs_per_obj[obj_idx] = {} self.mask_inputs_per_obj[obj_idx] = {} self.output_dict_per_obj[obj_idx] = { "cond_frame_outputs": {}, "non_cond_frame_outputs": {}, } self.frames_tracked_per_obj[obj_idx] = {} return obj_idx # Video Inference specific functions def obj_idx_to_id(self, obj_idx: int) -> int: """Map model-side object index to client-side object id.""" return self._obj_idx_to_id[obj_idx] def get_obj_num(self) -> int: """Get the total number of unique object ids received so far in this session.""" return len(self._obj_idx_to_id) # Input management with device handling def add_point_inputs(self, obj_idx: int, frame_idx: int, inputs: dict): """Add point inputs with automatic device placement.""" device_inputs = {} for key, value in inputs.items(): if isinstance(value, torch.Tensor): device_inputs[key] = value.to(self.inference_device, non_blocking=True) else: device_inputs[key] = value self.point_inputs_per_obj[obj_idx][frame_idx] = device_inputs def remove_point_inputs(self, obj_idx: int, frame_idx: int): """Remove point inputs.""" self.point_inputs_per_obj[obj_idx].pop(frame_idx, None) def add_mask_inputs(self, obj_idx: int, frame_idx: int, inputs: torch.Tensor): """Add mask inputs with automatic device placement.""" self.mask_inputs_per_obj[obj_idx][frame_idx] = inputs.to( self.inference_device, dtype=self.dtype, non_blocking=True ) def remove_mask_inputs(self, obj_idx: int, frame_idx: int): """Remove mask inputs.""" self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None) # Output management with smart device placement def store_output( self, obj_idx: int, frame_idx: int, output_key: Optional[str] = None, output_value: Optional[Union[torch.Tensor, dict]] = None, is_conditioning_frame: bool = True, ): """ Store output with smart device management. If output_key is None, the output is stored as a dictionary. Args: obj_idx (int): The index of the object. frame_idx (int): The index of the frame. output_key (Optional[str]): The key of the output. If None, the output is stored as a dictionary. output_value (Optional[Union[torch.Tensor, dict]]): The value of the output. is_conditioning_frame (bool): Whether the output is for a conditioning frame. """ storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs" if output_key is None and isinstance(output_value, dict): self.output_dict_per_obj[obj_idx][storage_key][frame_idx] = {} for key, value in output_value.items(): self.store_output(obj_idx, frame_idx, key, value, is_conditioning_frame) return # Device placement: small tensors stay on inference device, large ones go to inference state device if output_key in ["object_pointer", "object_score_logits"]: # Small tensors self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value elif isinstance(output_value, torch.Tensor): # Large tensors like masks, features self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value.to( self.inference_state_device, non_blocking=True ) else: self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value def get_output( self, obj_idx: int, frame_idx: int, output_key: str, is_conditioning_frame: bool = True, ): """ Get output with smart device management. Args: obj_idx (int): The index of the object. frame_idx (int): The index of the frame. output_key (str): The key of the output. is_conditioning_frame (bool): Whether the output is for a conditioning frame. """ storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs" out = self.output_dict_per_obj[obj_idx][storage_key].get(frame_idx, None) # move to inference device if needed if out is None: return None value = out[output_key] if isinstance(value, torch.Tensor): value = value.to(self.inference_device, non_blocking=True) return value # Video frame management def add_new_frame(self, pixel_values: torch.Tensor) -> int: """Add new frame with automatic device placement.""" pixel_values = pixel_values.to(self.video_storage_device, dtype=self.dtype, non_blocking=True) if pixel_values.dim() == 4: pixel_values = pixel_values.squeeze(0) if self.processed_frames is None: self.processed_frames = [pixel_values] else: self.processed_frames.append(pixel_values) return self.num_frames - 1 def get_frame(self, frame_idx: int) -> torch.Tensor: """Get frame from video.""" return self.processed_frames[frame_idx].to(self.inference_device, non_blocking=True) def reset_tracking_data(self): """Reset tracking data but keep cache.""" self._obj_id_to_idx.clear() self._obj_idx_to_id.clear() self.obj_ids.clear() self.point_inputs_per_obj.clear() self.mask_inputs_per_obj.clear() self.output_dict_per_obj.clear() self.frames_tracked_per_obj.clear() self.obj_with_new_inputs = [] # Note: cache and video data are preserved def reset_inference_session(self): """Reset tracking data and cache.""" self._obj_id_to_idx.clear() self._obj_idx_to_id.clear() self.obj_ids.clear() self.point_inputs_per_obj.clear() self.mask_inputs_per_obj.clear() self.output_dict_per_obj.clear() self.frames_tracked_per_obj.clear() self.obj_with_new_inputs = [] self.cache.clear_all() class Sam2VideoLayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_first"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x # copied and adapted from original implementation, also practically equal to DetrSinePositionEmbedding class Sam2VideoPositionEmbeddingSine(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__( self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None ): super().__init__() if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize self.scale = 2 * math.pi if scale is None else scale @compile_compatible_method_lru_cache(maxsize=1) def forward( self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor] = None, ) -> Tensor: if mask is None: mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool) not_mask = (~mask).to(dtype) y_embed = not_mask.cumsum(1) x_embed = not_mask.cumsum(2) if self.normalize: eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Sam2VideoAttention(nn.Module): """ SAM2_VIDEO's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None): super().__init__() downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.config = config self.hidden_size = config.hidden_size self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads self.head_dim = self.internal_dim // config.num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_similarity: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=attention_similarity, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Sam2VideoTwoWayAttentionBlock(nn.Module): def __init__(self, config: Sam2VideoMaskDecoderConfig, skip_first_layer_pe: bool = False): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`Sam2VideoMaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__() self.self_attn = Sam2VideoAttention(config, downsample_rate=1) self.layer_norm1 = nn.LayerNorm(config.hidden_size) self.cross_attn_token_to_image = Sam2VideoAttention(config) self.layer_norm2 = nn.LayerNorm(config.hidden_size) self.mlp = Sam2VideoFeedForward( config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers ) self.layer_norm3 = nn.LayerNorm(config.hidden_size) self.layer_norm4 = nn.LayerNorm(config.hidden_size) self.cross_attn_image_to_token = Sam2VideoAttention(config) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs], ): # Self attention block if self.skip_first_layer_pe: queries, _ = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out, _ = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_token_to_image( query=query, key=key, value=keys, attention_similarity=attention_similarity ) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) return queries, keys, attn_out class Sam2VideoFeedForward(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, activation: str = "relu", sigmoid_output: bool = False, ): super().__init__() self.num_layers = num_layers self.activation = ACT2FN[activation] self.proj_in = nn.Linear(input_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) self.sigmoid_output = sigmoid_output def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = F.sigmoid(hidden_states) return hidden_states @dataclass @auto_docstring(custom_intro="Base class for the Sam2Video model's output.") class Sam2VideoImageSegmentationOutput(ModelOutput): r""" iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`): The Intersection over Union (IoU) scores of the predicted masks. pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`): The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed by the processor to be brought to the original image size. object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`): Logits for the object score, indicating if an object is present. image_embeddings (`tuple(torch.FloatTensor)`): The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each tensor has shape `(batch_size, channels, height, width)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the vision model at the output of each stage. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the vision model. mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the mask decoder. high_res_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, image_size, image_size)`, *optional*): The predicted masks, upscaled to the original image size. Only used for Sam2VideoModel. object_pointer (`torch.FloatTensor` of shape `(batch_size, point_batch_size, hidden_size)`, *optional*): A tensor representing the object pointer, used for tracking in videos. Only used for Sam2VideoModel. """ iou_scores: torch.FloatTensor = None pred_masks: torch.FloatTensor = None object_score_logits: torch.FloatTensor = None image_embeddings: tuple[torch.FloatTensor, ...] = None vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None high_res_masks: torch.FloatTensor = None object_pointer: torch.FloatTensor = None @dataclass @auto_docstring(custom_intro="Base class for the Sam2 model's output.") class Sam2VideoSegmentationOutput(ModelOutput): r""" pred_masks (`torch.FloatTensor` of shape `(batch_size, num_masks, height, width)`): The predicted masks stored at the model's resolution. frame_idx (`int`): The frame index of the video. """ pred_masks: torch.FloatTensor = None frame_idx: int = None @auto_docstring class Sam2VideoPreTrainedModel(PreTrainedModel): config_class = Sam2VideoConfig base_model_prefix = "sam2_video" main_input_name = "pixel_values" _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, Sam2VideoLayerNorm)): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, Sam2VideoModel): if module.no_memory_positional_encoding is not None: module.no_memory_positional_encoding.data.zero_() if module.memory_temporal_positional_encoding is not None: module.memory_temporal_positional_encoding.data.zero_() if module.no_object_pointer is not None: module.no_object_pointer.data.zero_() if module.occlusion_spatial_embedding_parameter is not None: module.occlusion_spatial_embedding_parameter.data.zero_() if isinstance(module, Sam2VideoMemoryFuserCXBlock): if module.scale is not None: module.scale.data.zero_() class Sam2VideoVisionRotaryEmbedding(nn.Module): """ Vision Rotary Position Embedding for SAM2, following transformers library standards. Supports 2D (axial) rotary embeddings for spatial dimensions. """ def __init__(self, config: Sam2VideoConfig): super().__init__() dim = config.memory_attention_hidden_size // ( config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads ) # Ensure even dimension for proper axial splitting if dim % 4 != 0: raise ValueError("Dimension must be divisible by 4 for axial RoPE") end_x, end_y = config.memory_attention_rope_feat_sizes freqs = 1.0 / (config.memory_attention_rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim)) # Generate 2D position indices for axial rotary embedding flattened_indices = torch.arange(end_x * end_y, dtype=torch.long) x_positions = flattened_indices % end_x y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor") freqs_x = torch.outer(x_positions, freqs).float() freqs_y = torch.outer(y_positions, freqs).float() inv_freq = torch.cat([freqs_x, freqs_y], dim=-1) inv_freq = inv_freq.repeat_interleave(2, dim=-1) # directly register the cos and sin embeddings as we have a fixed feature shape self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False) self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False) @torch.no_grad() def forward(self) -> tuple[torch.Tensor, torch.Tensor]: # As the feature map size is fixed, we can just return the pre-computed embeddings. return self.rope_embeddings_cos, self.rope_embeddings_sin def rotate_pairwise(x): """ pairwise rotation of the hidden dims of the input. Differerent from Llama Half-Tensor Rotation. This is an optimized version of the following more explicit implementation: ```python x_rotated = torch.zeros_like(x, dtype=x.dtype, device=x.device) x_rotated[..., ::2] = -x[..., 1::2] x_rotated[..., 1::2] = x[..., ::2] return x_rotated ``` """ x = x.view(*x.shape[:-1], -1, 2) x1, x2 = x.unbind(dim=-1) x = torch.stack((-x2, x1), dim=-1) return x.flatten(start_dim=-2) # TODO: This leads to ~1e-07 max diff and ~1e-09 avg diff for q_embed and k_embed from the original implementation, most likely due to the use of complex tensors in the original implementation. def apply_rotary_pos_emb_2d( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, num_k_exclude_rope: int = 0, repeat_freqs_k: bool = False, ) -> tuple[torch.Tensor, torch.Tensor]: """ Apply rotary position embedding to query and key tensors for vision models. Follows the standard transformers library pattern. Args: q: Query tensor of shape (..., seq_len, head_dim) k: Key tensor of shape (..., seq_len, head_dim) cos: Cosine position embedding of shape (seq_len, head_dim) sin: Sine position embedding of shape (seq_len, head_dim) repeat_freqs_k: Whether to repeat frequencies for keys (for cross-attention) Returns: Rotated (q, k) tensors """ k_rot, k_pass = k[..., : k.shape[-2] - num_k_exclude_rope, :], k[..., k.shape[-2] - num_k_exclude_rope :, :] q_embed = q.float() # force upscale to float32 as in the original implementation q_embed = (q_embed * cos) + (rotate_pairwise(q_embed) * sin) if k_rot.shape[-2] == 0: # Handle case where keys might be empty due to dropout return q_embed.type_as(q), torch.cat([k_rot, k_pass], dim=-2) # Handle key tensor - may need to repeat frequencies if different sequence length if repeat_freqs_k and k_rot.shape[-2] != q.shape[-2]: # Repeat cos/sin to match key sequence length repeat_factor = k_rot.shape[-2] // q.shape[-2] cos_k = cos.repeat(1, 1, repeat_factor, 1) sin_k = sin.repeat(1, 1, repeat_factor, 1) else: cos_k = cos sin_k = sin # Apply rotary embedding to keys k_embed = k_rot.float() # force upscale to float32 as in the original implementation k_embed = (k_embed * cos_k) + (rotate_pairwise(k_embed) * sin_k) # Concatenate back to full shape k_embed = torch.cat([k_embed.type_as(k), k_pass], dim=-2) return q_embed.type_as(q), k_embed class Sam2VideoRoPEAttention(nn.Module): """Attention with rotary position encoding.""" def __init__( self, config: Sam2VideoConfig, kv_in_dim: Optional[int] = None, rope_k_repeat=False, ): super().__init__() self.config = config self.hidden_size = config.memory_attention_hidden_size self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate self.num_attention_heads = config.memory_attention_num_attention_heads self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.kv_in_dim = kv_in_dim if kv_in_dim is not None else self.hidden_size self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) self.rope_k_repeat = rope_k_repeat self.dropout_p = config.memory_attention_rope_dropout def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], num_k_exclude_rope: int = 0, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tensor: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) cos, sin = position_embeddings # Apply rotary position encoding, excluding some keys if specified query, key = apply_rotary_pos_emb_2d( query, key, cos, sin, repeat_freqs_k=self.rope_k_repeat, num_k_exclude_rope=num_k_exclude_rope ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=None, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Sam2VideoMemoryAttentionLayer(nn.Module): def __init__(self, config: Sam2VideoConfig): super().__init__() hidden_size = config.memory_attention_hidden_size self.self_attn = Sam2VideoRoPEAttention(config) self.cross_attn_image = Sam2VideoRoPEAttention(config, kv_in_dim=64, rope_k_repeat=True) # Implementation of Feedforward model self.linear1 = nn.Linear(hidden_size, config.memory_attention_feed_forward_hidden_size) self.dropout = nn.Dropout(config.memory_attention_dropout) self.linear2 = nn.Linear(config.memory_attention_feed_forward_hidden_size, hidden_size) self.layer_norm1 = nn.LayerNorm(hidden_size) self.layer_norm2 = nn.LayerNorm(hidden_size) self.layer_norm3 = nn.LayerNorm(hidden_size) self.dropout1 = nn.Dropout(config.memory_attention_dropout) self.dropout2 = nn.Dropout(config.memory_attention_dropout) self.dropout3 = nn.Dropout(config.memory_attention_dropout) self.activation = ACT2FN[config.memory_attention_feed_forward_hidden_act] def forward( self, queries: Tensor, keys: Tensor, key_point_embedding: Tensor, rope_position_embeddings: tuple[Tensor, Tensor], num_k_exclude_rope: int = 0, ) -> torch.Tensor: # Self-Attention query = self.layer_norm1(queries) query, _ = self.self_attn(query=query, key=query, value=query, position_embeddings=rope_position_embeddings) queries = queries + self.dropout1(query) # Cross-Attention query = self.layer_norm2(queries) query, _ = self.cross_attn_image( query=query, key=keys + key_point_embedding, value=keys, position_embeddings=rope_position_embeddings, num_k_exclude_rope=num_k_exclude_rope, ) queries = queries + self.dropout2(query) # MLP query = self.layer_norm3(queries) query = self.linear2(self.dropout(self.activation(self.linear1(query)))) queries = queries + self.dropout3(query) return queries class Sam2VideoMemoryAttention(nn.Module): def __init__(self, config: Sam2VideoConfig): super().__init__() self.layers = nn.ModuleList( [Sam2VideoMemoryAttentionLayer(config) for _ in range(config.memory_attention_num_layers)] ) self.layer_norm = nn.LayerNorm(config.memory_attention_hidden_size) self.rotary_emb = Sam2VideoVisionRotaryEmbedding(config=config) def forward( self, current_vision_features: torch.Tensor, memory: torch.Tensor, current_vision_position_embeddings: Optional[Tensor] = None, memory_posision_embeddings: Optional[Tensor] = None, num_object_pointer_tokens: int = 0, ): """ Args: current_vision_features (`torch.FloatTensor`): The current vision features used for self-attention. memory (`torch.FloatTensor`): The memory features used for cross-attention. current_vision_position_embeddings (`torch.FloatTensor`, *optional*): The position embeddings for the current vision features. memory_posision_embeddings (`torch.FloatTensor`, *optional*): The position embeddings for the memory features. num_object_pointer_tokens (`int`, *optional*, defaults to 0): The number of object pointer tokens. """ output = current_vision_features if current_vision_position_embeddings is not None: output = output + 0.1 * current_vision_position_embeddings # Convert to batch first output = output.transpose(0, 1) memory = memory.transpose(0, 1).unsqueeze(1) memory_posision_embeddings = memory_posision_embeddings.transpose(0, 1).unsqueeze(1) rope_position_embeddings = self.rotary_emb() for layer in self.layers: output = layer( queries=output.unsqueeze(1) if output.ndim == 3 else output, keys=memory, key_point_embedding=memory_posision_embeddings, rope_position_embeddings=rope_position_embeddings, num_k_exclude_rope=num_object_pointer_tokens, ) normed_output = self.layer_norm(output) # Convert back to seq first normed_output = normed_output.transpose(0, 1) return normed_output # Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) class Sam2VideoMemoryFuserCXBlock(GradientCheckpointingLayer): def __init__(self, config: Sam2VideoConfig): super().__init__() self.depthwise_conv = nn.Conv2d( config.memory_fuser_embed_dim, config.memory_fuser_embed_dim, kernel_size=config.memory_fuser_kernel_size, padding=config.memory_fuser_padding, groups=config.memory_fuser_embed_dim, ) # depthwise conv self.layer_norm = Sam2VideoLayerNorm(config.memory_fuser_embed_dim, eps=1e-6) self.activation = ACT2FN[config.memory_fuser_hidden_act] self.pointwise_conv1 = nn.Linear( config.memory_fuser_embed_dim, config.memory_fuser_intermediate_dim ) # pointwise/1x1 convs, implemented with linear layers self.pointwise_conv2 = nn.Linear(config.memory_fuser_intermediate_dim, config.memory_fuser_embed_dim) self.scale = nn.Parameter( config.memory_fuser_layer_scale_init_value * torch.ones((config.memory_fuser_embed_dim)), requires_grad=True, ) def forward(self, hidden_states): input = hidden_states hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) hidden_states = self.pointwise_conv1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.pointwise_conv2(hidden_states) hidden_states = self.scale * hidden_states hidden_states = hidden_states.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) hidden_states = input + hidden_states return hidden_states class Sam2VideoMemoryFuser(nn.Module): def __init__(self, config: Sam2VideoConfig): super().__init__() self.layers = nn.ModuleList( [Sam2VideoMemoryFuserCXBlock(config) for _ in range(config.memory_fuser_num_layers)] ) def forward(self, hidden_states): # normally hidden_states: (N, C, H, W) for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class Sam2VideoMaskDownSamplerLayer(nn.Module): def __init__(self, config: Sam2VideoConfig, in_channels: int, out_channels: int): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=config.mask_downsampler_kernel_size, stride=config.mask_downsampler_stride, padding=config.mask_downsampler_padding, ) self.layer_norm = Sam2VideoLayerNorm(out_channels, eps=1e-6) self.activation = ACT2FN[config.mask_downsampler_hidden_act] def forward(self, x): return self.activation(self.layer_norm(self.conv(x))) class Sam2VideoMaskDownSampler(nn.Module): """ Progressively downsample a mask by total_stride, each time by stride. Note that LayerNorm is applied per *token*, like in ViT. With each downsample (by a factor stride**2), channel capacity increases by the same factor. In the end, we linearly project to embed_dim channels. """ def __init__(self, config: Sam2VideoConfig): super().__init__() num_layers = int(math.log2(config.mask_downsampler_total_stride) // math.log2(config.mask_downsampler_stride)) self.layers = nn.ModuleList() self.activation = ACT2FN[config.mask_downsampler_hidden_act] mask_in_chans, mask_out_chans = 1, 1 for _ in range(num_layers): mask_out_chans = mask_in_chans * (config.mask_downsampler_stride**2) self.layers.append(Sam2VideoMaskDownSamplerLayer(config, mask_in_chans, mask_out_chans)) mask_in_chans = mask_out_chans self.final_conv = nn.Conv2d(mask_out_chans, config.mask_downsampler_embed_dim, kernel_size=1) def forward(self, x): for layer in self.layers: x = layer(x) x = self.final_conv(x) return x class Sam2VideoMemoryEncoder(nn.Module): def __init__(self, config: Sam2VideoConfig): super().__init__() hidden_size = config.memory_encoder_hidden_size output_channels = config.memory_encoder_output_channels self.mask_downsampler = Sam2VideoMaskDownSampler(config) self.feature_projection = nn.Conv2d(hidden_size, hidden_size, kernel_size=1) self.memory_fuser = Sam2VideoMemoryFuser(config) self.position_encoding = Sam2VideoPositionEmbeddingSine(num_pos_feats=output_channels // 2, normalize=True) self.projection = nn.Conv2d(hidden_size, output_channels, kernel_size=1) def forward( self, vision_features: torch.Tensor, masks: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: ## Process masks masks = self.mask_downsampler(masks) ## Fuse pixel_features and downsampled masks vision_features = self.feature_projection(vision_features) vision_features = vision_features + masks vision_features = self.memory_fuser(vision_features) vision_features = self.projection(vision_features) vision_pos_enc = self.position_encoding(vision_features.shape, vision_features.device, vision_features.dtype) return vision_features, vision_pos_enc @dataclass @auto_docstring(custom_intro="Base class for the vision encoder's outputs.") class Sam2VideoVisionEncoderOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. fpn_hidden_states (`tuple(torch.FloatTensor)`): Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape `(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck. fpn_position_encoding (`tuple(torch.FloatTensor)`): Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape `(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None fpn_hidden_states: Optional[torch.FloatTensor] = None fpn_position_encoding: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None class Sam2VideoPositionalEmbedding(nn.Module): def __init__(self, config: Sam2VideoPromptEncoderConfig): super().__init__() self.scale = config.scale positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2)) self.register_buffer("positional_embedding", positional_embedding) def forward(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = input_coords.clone() if input_shape is not None: coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1] coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0] coordinates.to(torch.float32) # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = coordinates.to(self.positional_embedding.dtype) coordinates = coordinates @ self.positional_embedding coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1) class Sam2VideoMaskEmbedding(nn.Module): def __init__(self, config: Sam2VideoPromptEncoderConfig): super().__init__() self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2) self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2) self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1) self.layer_norm1 = Sam2VideoLayerNorm( self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first" ) self.layer_norm2 = Sam2VideoLayerNorm( self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first" ) def forward(self, masks): hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) return dense_embeddings class Sam2VideoPromptEncoder(nn.Module): def __init__(self, config: Sam2VideoPromptEncoderConfig): super().__init__() self.shared_embedding = Sam2VideoPositionalEmbedding(config) self.mask_embed = Sam2VideoMaskEmbedding(config) self.no_mask_embed = nn.Embedding(1, config.hidden_size) self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size) self.input_image_size = config.image_size self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size) self.hidden_size = config.hidden_size self.not_a_point_embed = nn.Embedding(1, config.hidden_size) def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0) labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) # torch.where and expanding the labels tensor is required by the ONNX export point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding) # This is required for the ONNX export. The dtype, device need to be explicitly # specified as otherwise torch.onnx.export interprets as double point_embedding = torch.where( labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding), ) # Add point embeddings for labels >= 0 point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1) return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = boxes.shape[:2] coords = boxes.reshape(batch_size, nb_boxes, 2, 2) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding[:, :, 0, :] += self.point_embed.weight[2] corner_embedding[:, :, 1, :] += self.point_embed.weight[3] return corner_embedding def forward( self, input_points: Optional[tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor], ) -> tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`torch.Tensor`, *optional*): point coordinates and labels to embed. boxes (`torch.Tensor`, *optional*): boxes to embed masks (`torch.Tensor`, *optional*): masks to embed """ sparse_embeddings = None batch_size = 1 if input_points is not None: batch_size = input_points.shape[0] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = point_embeddings if input_boxes is not None: batch_size = input_boxes.shape[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) return sparse_embeddings, dense_embeddings class Sam2VideoTwoWayTransformer(nn.Module): def __init__(self, config: Sam2VideoMaskDecoderConfig): super().__init__() self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = nn.ModuleList() for i in range(self.num_hidden_layers): self.layers.append(Sam2VideoTwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) self.final_attn_token_to_image = Sam2VideoAttention(config) self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) def forward( self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: if target_embedding is not None: queries += target_embedding queries, keys, _ = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, **kwargs, ) # Apply the final attention layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys class Sam2VideoMaskDecoder(nn.Module): def __init__(self, config: Sam2VideoMaskDecoderConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.iou_token = nn.Embedding(1, self.hidden_size) self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) self.transformer = Sam2VideoTwoWayTransformer(config) # should we create a new class for this? self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) self.upscale_layer_norm = Sam2VideoLayerNorm(self.hidden_size // 4, data_format="channels_first") self.activation = nn.GELU() mlps_list = [] for _ in range(self.num_mask_tokens): mlps_list += [Sam2VideoFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) self.iou_prediction_head = Sam2VideoFeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth, sigmoid_output=True, ) self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1) self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1) self.obj_score_token = nn.Embedding(1, self.hidden_size) self.pred_obj_score_head = Sam2VideoFeedForward(self.hidden_size, self.hidden_size, 1, 3) self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh def forward( self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, high_resolution_features: list[torch.Tensor], attention_similarity: Optional[torch.Tensor] = None, target_embedding: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): The embeddings from the image encoder. image_positional_embeddings (`torch.Tensor`): Positional encoding with the shape of image_embeddings. sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes. dense_prompt_embeddings (`torch.Tensor`): The embeddings of the mask inputs. multimask_output (`bool`): Whether to return multiple masks or a single mask. high_resolution_features (`list[torch.Tensor]`, *optional*): The high-resolution features from the vision encoder. attention_similarity (`torch.Tensor`, *optional*): The attention similarity tensor. target_embedding (`torch.Tensor`, *optional*): The target embedding. """ batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] # Concatenate output tokens output_tokens = torch.cat( [ self.obj_score_token.weight, self.iou_token.weight, self.mask_tokens.weight, ], dim=0, ) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings.shape[0] != 0: tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) # Expand per-image data in batch direction to be per-mask image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) # Run the transformer point_embeddings, image_embeddings = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, **kwargs, ) iou_token_out = point_embeddings[:, :, 1, :] mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens image_embeddings = image_embeddings.transpose(2, 3).view( batch_size * point_batch_size, num_channels, height, width ) feat_s0, feat_s1 = high_resolution_features feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0) feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0) upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1 upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0) hyper_in_list: list[torch.Tensor] = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width) masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width) # Generate mask quality predictions iou_pred = self.iou_prediction_head(iou_token_out) object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :]) # Select the correct mask or masks for output if multimask_output: mask_slice = slice(1, None) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] elif self.dynamic_multimask_via_stability and not self.training: mask_slice = slice(0, 1) masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape return masks, iou_pred, sam_tokens_out, object_score_logits def _get_stability_scores(self, mask_logits): """ Compute stability scores of the mask logits based on the IoU between upper and lower thresholds. """ mask_logits = mask_logits.flatten(-2) stability_delta = self.dynamic_multimask_stability_delta area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) return stability_scores def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): """ When outputting a single mask, if the stability score from the current single-mask output (based on output token 0) falls below a threshold, we instead select from multi-mask outputs (based on output token 1~3) the mask with the highest predicted IoU score. This is intended to ensure a valid mask for both clicking and tracking. """ # The best mask from multimask output tokens (1~3) multimask_logits = all_mask_logits[:, :, 1:, :, :] multimask_iou_scores = all_iou_scores[:, :, 1:] best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P] best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) best_scores_inds_expanded = best_scores_inds_expanded.expand( -1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1) ) best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W] best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1] # The mask from singlemask output token 0 and its stability score singlemask_logits = all_mask_logits[:, :, 0:1, :, :] singlemask_iou_scores = all_iou_scores[:, :, 0:1] stability_scores = self._get_stability_scores(singlemask_logits) is_stable = stability_scores >= self.dynamic_multimask_stability_thresh # Dynamically fall back to best multimask output upon low stability scores. mask_logits_out = torch.where( is_stable[..., None, None].expand_as(singlemask_logits), singlemask_logits, best_multimask_logits, ) iou_scores_out = torch.where( is_stable.expand_as(singlemask_iou_scores), singlemask_iou_scores, best_multimask_iou_scores, ) return mask_logits_out, iou_scores_out # a large negative value as a placeholder score for missing objects NO_OBJ_SCORE = -1024.0 def get_1d_sine_pe(pos_inds, dim, temperature=10000): """ Get 1D sine positional embedding as in the original Transformer paper. """ pe_dim = dim // 2 dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) pos_embed = pos_inds.unsqueeze(-1) / dim_t pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) return pos_embed @auto_docstring class Sam2VideoModel(Sam2VideoPreTrainedModel): _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"] # need to be ignored, as it's a buffer and will not be correctly detected as tied weight _keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"] _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2VideoTwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [] def __init__(self, config: Sam2VideoConfig): super().__init__(config) self.shared_image_embedding = Sam2VideoPositionalEmbedding(config.prompt_encoder_config) self.vision_encoder = AutoModel.from_config(config.vision_config) self.prompt_encoder = Sam2VideoPromptEncoder(config.prompt_encoder_config) # The module using it is not a PreTrainedModel subclass so we need this config.mask_decoder_config._attn_implementation = config._attn_implementation self.mask_decoder = Sam2VideoMaskDecoder(config.mask_decoder_config) self.num_feature_levels = config.vision_config.num_feature_levels self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes # a single token to indicate no memory embedding from previous frames self.hidden_dim = config.vision_config.fpn_hidden_size self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) self.config = config # For video sequence inference self.image_size = config.image_size self.memory_attention = Sam2VideoMemoryAttention(config) self.memory_encoder = Sam2VideoMemoryEncoder(config) self.no_memory_positional_encoding = torch.nn.Parameter( torch.zeros(1, 1, config.vision_config.fpn_hidden_size) ) self.mem_dim = config.memory_encoder_output_channels self.num_maskmem = config.num_maskmem # Number of memories accessible # Temporal encoding of the memories self.memory_temporal_positional_encoding = torch.nn.Parameter( torch.zeros(self.num_maskmem, 1, 1, self.mem_dim) ) self.no_object_pointer = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) # A conv layer to downsample the mask prompt to stride 4 (the same stride as # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale, # so that it can be fed into the SAM mask decoder to generate a pointer. self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) # a feedforward layer on SAM output tokens to turn them into object pointers self.object_pointer_proj = Sam2VideoFeedForward(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3) if self.config.enable_temporal_pos_encoding_for_object_pointers: # a linear projection on temporal positional encoding in object pointers to # avoid potential interference with spatial positional encoding self.temporal_positional_encoding_projection_layer = torch.nn.Linear(self.hidden_dim, self.mem_dim) else: self.temporal_positional_encoding_projection_layer = torch.nn.Identity() self.occlusion_spatial_embedding_parameter = None # compatibility with Sam2 if config.enable_occlusion_spatial_embedding: self.occlusion_spatial_embedding_parameter = torch.nn.Parameter(torch.zeros(1, self.mem_dim)) self.post_init() def _tie_weights(self): self.prompt_encoder.shared_embedding.positional_embedding.data = ( self.shared_image_embedding.positional_embedding.data ) def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self) -> torch.Tensor: size = self.prompt_encoder.image_embedding_size target_device = self.shared_image_embedding.positional_embedding.device target_dtype = self.shared_image_embedding.positional_embedding.dtype grid = torch.ones(size, device=target_device, dtype=target_dtype) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / size[0] x_embed = x_embed / size[1] positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1)) return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width @torch.no_grad() def get_image_embeddings( self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs], ) -> list[torch.Tensor]: r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values """ batch_size = pixel_values.shape[0] feature_maps, _, _, _ = self.get_image_features(pixel_values, **kwargs) # add no memory embedding to the last feature map feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding # reshape feature maps to the same shape as the backbone feature sizes image_embeddings = [ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes) ] return image_embeddings @torch.no_grad() def get_prompt_embeddings( self, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, torch.Tensor]: r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @torch.inference_mode() @auto_docstring(custom_intro="Propagate the objects through a streamed video frame.") def forward( self, inference_session: Sam2VideoInferenceSession, frame_idx: Optional[int] = None, frame: Optional[torch.Tensor] = None, reverse: bool = False, ) -> Sam2VideoSegmentationOutput: r""" inference_session (`Sam2VideoInferenceSession`): The video inference session object. frame_idx (`int`, *optional*): The index of the frame on which to run inference. No need to provide when inferring on a new streamed frame. frame (`torch.Tensor`, *optional*): The frame to process. Provide when streaming. reverse (`bool`, *optional*, defaults to `False`): Whether to propagate in reverse. """ if frame is not None: frame_idx = inference_session.add_new_frame(frame) if frame is not None and inference_session.get_obj_num() == 0: raise ValueError("No objects are provided for tracking; please add inputs first.") num_objects = inference_session.get_obj_num() pred_masks_per_obj = [None] * num_objects # Note: We avoid batched inference here because per-object inputs (clicks/masks) # can differ across objects. for obj_idx in range(num_objects): obj_id = inference_session.obj_idx_to_id(obj_idx) has_new_inputs = obj_id in inference_session.obj_with_new_inputs has_cond_output = frame_idx in inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"] # If this object has no new inputs and this frame already has a # conditioning output, reuse the cached masks instead of recomputing. if (not has_new_inputs) and has_cond_output: pred_masks = inference_session.get_output(obj_idx, frame_idx, "pred_masks", is_conditioning_frame=True) is_init_cond_frame = True else: # Defaults when there are no new inputs is_init_cond_frame = False point_inputs = None mask_inputs = None if has_new_inputs: is_init_cond_frame = frame_idx not in inference_session.frames_tracked_per_obj[obj_idx] if is_init_cond_frame: reverse = False point_inputs = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None) mask_inputs = inference_session.mask_inputs_per_obj[obj_idx].get(frame_idx, None) if point_inputs is not None or mask_inputs is not None: inference_session.obj_with_new_inputs.remove(obj_id) current_out = self._run_single_frame_inference( inference_session=inference_session, obj_idx=obj_idx, frame_idx=frame_idx, batch_size=1, # run on the slice of a single object is_init_cond_frame=is_init_cond_frame, point_inputs=point_inputs, mask_inputs=mask_inputs, reverse=reverse, run_mem_encoder=True, streaming=frame is not None, ) inference_session.store_output( obj_idx, frame_idx, output_value=current_out, is_conditioning_frame=is_init_cond_frame ) pred_masks = current_out["pred_masks"] pred_masks_per_obj[obj_idx] = pred_masks if not is_init_cond_frame: # only for tracked frames, not for initial conditioning frames inference_session.frames_tracked_per_obj[obj_idx][frame_idx] = {"reverse": reverse} # Resize the output mask to the original video resolution (we directly use # the mask scores on GPU for output to avoid any CPU conversion in between) if len(pred_masks_per_obj) > 1: all_pred_masks = torch.cat(pred_masks_per_obj, dim=0) else: all_pred_masks = pred_masks_per_obj[0] return Sam2VideoSegmentationOutput(pred_masks=all_pred_masks, frame_idx=frame_idx) def get_image_features( self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs], ) -> tuple[ list[torch.Tensor], list[torch.Tensor], Optional[tuple[torch.FloatTensor, ...]], Optional[tuple[torch.FloatTensor, ...]], ]: r""" Extract and preprocess image features using the vision encoder. Args: pixel_values (`torch.FloatTensor`): Input pixel values of shape `(batch_size, num_channels, height, width)`. Returns: `tuple`: A tuple containing: - feature_maps (`list[torch.Tensor]`): List of feature maps from different levels. - feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level. - vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder. - vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder. """ vision_outputs: Sam2VideoVisionEncoderOutput = self.vision_encoder( pixel_values, **kwargs, ) feature_maps = vision_outputs.fpn_hidden_states feature_maps_position_embeddings = vision_outputs.fpn_position_encoding # precompute projected level 0 and level 1 features in SAM decoder # to avoid running it again on every SAM click feature_maps = list(feature_maps) feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0]) feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1]) # flatten NxCxHxW to HWxNxC feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps] feature_maps_position_embeddings = [ feature_map_position_embedding.flatten(2).permute(2, 0, 1) for feature_map_position_embedding in feature_maps_position_embeddings ] return feature_maps, feature_maps_position_embeddings, vision_outputs.hidden_states, vision_outputs.attentions def _prepare_vision_features( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, batch_size: int, ) -> tuple[torch.Tensor, list[torch.Tensor]]: """Prepare vision features for a frame.""" # Check if features are cached if cached_features := inference_session.cache.get_vision_features(frame_idx): vision_feats = cached_features["vision_feats"] vision_pos_embeds = cached_features["vision_pos_embeds"] else: # Compute features using image encoder image_batch = inference_session.get_frame(frame_idx).unsqueeze(0) # Add batch dimension vision_feats, vision_pos_embeds, _, _ = self.get_image_features(image_batch) # Cache features inference_session.cache.cache_vision_features( frame_idx, {"vision_feats": vision_feats, "vision_pos_embeds": vision_pos_embeds} ) # Expand to batch size if needed if batch_size > 1: vision_feats = vision_feats.expand(batch_size, -1, -1, -1) vision_pos_embeds = [pe.expand(batch_size, -1, -1, -1) for pe in vision_pos_embeds] return vision_feats, vision_pos_embeds def _single_frame_forward( self, pixel_values: Optional[torch.FloatTensor] = None, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, image_embeddings: Optional[torch.FloatTensor] = None, multimask_output: bool = True, attention_similarity: Optional[torch.FloatTensor] = None, target_embedding: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Sam2VideoImageSegmentationOutput: """ input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`): Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much better results. The points can be obtained by passing a list of list of list to the processor that will create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per input point), the third dimension is the number of points per segmentation mask (it is possible to pass multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) coordinates of the point. If a different number of points is passed either for each image, or for each mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the computation of the embedding will be skipped for these points using the labels. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`): Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the official implementation, there are 3 types of labels - `1`: the point is a point that contains the object of interest - `0`: the point is a point that does not contain the object of interest - `-1`: the point corresponds to the background We added the label: - `-10`: the point is a padding point, thus should be ignored by the prompt encoder The padding labels should be automatically done by the processor. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`): Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch size, the number of boxes per image and the coordinates of the top left and botton right point of the box. In the order (`x1`, `y1`, `x2`, `y2`): - `x1`: the x coordinate of the top left point of the input box - `y1`: the y coordinate of the top left point of the input box - `x2`: the x coordinate of the bottom right point of the input box - `y2`: the y coordinate of the bottom right point of the input box input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`): SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`): Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` method, and then feed them to the `forward` method instead of feeding the `pixel_values`. multimask_output (`bool`, *optional*): In the original implementation and paper, the model always outputs 3 masks per image (or per point / per bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the "best" mask, by specifying `multimask_output=False`. attention_similarity (`torch.FloatTensor`, *optional*): Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). target_embedding (`torch.FloatTensor`, *optional*): Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). """ if not ((pixel_values is None) ^ (image_embeddings is None)): raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.") if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( input_points.shape[1], input_boxes.shape[1] ) ) elif input_points is not None: num_objects = input_points.shape[1] elif input_boxes is not None: num_objects = input_boxes.shape[1] elif input_masks is not None: num_objects = input_masks.shape[1] else: num_objects = 1 image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0] image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) vision_attentions = None vision_hidden_states = None if pixel_values is not None: feature_maps, _, vision_hidden_states, vision_attentions = self.get_image_features( pixel_values, **kwargs, ) # add no memory embedding to the last feature map feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding # reshape feature maps to the same shape as the backbone feature sizes image_embeddings = [ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes) ] if input_points is not None and input_labels is None: input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device) if input_points is None and input_boxes is None: # If no points are provide, pad with an empty point (with label -1) input_points = torch.zeros( batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device ) input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device) if input_masks is not None: # If mask_inputs is provided, downsize it into low-res mask input if needed # and feed it as a dense mask prompt into the SAM mask encoder if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size: input_masks = F.interpolate( input_masks.float(), size=self.prompt_encoder.mask_input_size, align_corners=False, mode="bilinear", antialias=True, # use antialias for downsampling ).to(input_masks.dtype) sparse_embeddings, dense_embeddings = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_multimasks, iou_scores, sam_output_tokens, object_score_logits = self.mask_decoder( image_embeddings=image_embeddings[-1], image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, high_resolution_features=image_embeddings[:-1], attention_similarity=attention_similarity, target_embedding=target_embedding, **kwargs, ) is_obj_appearing = object_score_logits > 0 # Mask used for spatial memories is always a *hard* choice between obj and no obj, # consistent with the actual mask prediction low_res_multimasks = torch.where( is_obj_appearing[:, None, None], low_res_multimasks, NO_OBJ_SCORE, ) # convert masks from possibly bfloat16 (or float16) to float32 # (older PyTorch versions before 2.1 don't support `interpolate` on bf16) high_res_multimasks = ( F.interpolate( low_res_multimasks.squeeze(1).float(), size=(self.image_size, self.image_size), mode="bilinear", align_corners=False, ) .unsqueeze(1) .to(low_res_multimasks.dtype) ) sam_output_token = sam_output_tokens[:, :, 0] if multimask_output: # take the best mask prediction (with the highest IoU estimation) best_iou_inds = torch.argmax(iou_scores, dim=-1) batch_inds = torch.arange(batch_size, device=high_res_multimasks.device) object_batch_inds = torch.arange(num_objects, device=high_res_multimasks.device) low_res_masks = low_res_multimasks[batch_inds, object_batch_inds, best_iou_inds] high_res_masks = high_res_multimasks[batch_inds, object_batch_inds, best_iou_inds] if sam_output_tokens.size(2) > 1: sam_output_token = sam_output_tokens[batch_inds, object_batch_inds, best_iou_inds] else: low_res_masks, high_res_masks = low_res_multimasks[:, :, 0], high_res_multimasks[:, :, 0] # Extract object pointer from the SAM output token (with occlusion handling) object_pointer = self.object_pointer_proj(sam_output_token) lambda_is_obj_appearing = is_obj_appearing.to(object_pointer.dtype) object_pointer = lambda_is_obj_appearing * object_pointer object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer return Sam2VideoImageSegmentationOutput( iou_scores=iou_scores, pred_masks=low_res_masks, high_res_masks=high_res_masks, object_pointer=object_pointer, object_score_logits=object_score_logits, image_embeddings=image_embeddings, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, ) def _use_mask_as_output( self, backbone_features: torch.Tensor, high_res_features: list[torch.Tensor], mask_inputs: torch.Tensor, ) -> Sam2VideoImageSegmentationOutput: """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in forward above). """ # Use -10/+20 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid). out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05 mask_inputs_float = mask_inputs.to(backbone_features[0].dtype) high_res_masks = mask_inputs_float * out_scale + out_bias low_res_masks = F.interpolate( high_res_masks.float(), size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), align_corners=False, mode="bilinear", antialias=True, # use antialias for downsampling ).to(backbone_features[0].dtype) # a dummy IoU prediction of all 1's under mask input iou_scores = mask_inputs.new_ones(mask_inputs.size(0), 1).to(backbone_features[0].dtype) # produce an object pointer using the SAM decoder from the mask input object_pointer = self._single_frame_forward( input_masks=self.mask_downsample(mask_inputs_float.to(backbone_features[0].dtype)), image_embeddings=high_res_features + [backbone_features], ).object_pointer # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying # on the object_scores from the SAM decoder. is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) is_obj_appearing = is_obj_appearing[..., None] lambda_is_obj_appearing = is_obj_appearing.to(backbone_features[0].dtype) object_score_logits = out_scale * lambda_is_obj_appearing + out_bias object_pointer = lambda_is_obj_appearing * object_pointer object_pointer = object_pointer + (1 - lambda_is_obj_appearing) * self.no_object_pointer return Sam2VideoImageSegmentationOutput( iou_scores=iou_scores, pred_masks=low_res_masks, high_res_masks=high_res_masks, object_pointer=object_pointer, object_score_logits=object_score_logits, image_embeddings=high_res_features + [backbone_features], ) def _prepare_memory_conditioned_features( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, obj_idx: int, is_initial_conditioning_frame: bool, current_vision_features: list[torch.Tensor], current_vision_positional_embeddings: list[torch.Tensor], num_total_frames: int, track_in_reverse_time: bool = False, streaming: bool = False, ) -> torch.Tensor: """ Fuse current frame's visual features with memory from previous frames for enhanced object tracking. This method conditions the current frame's visual features on temporal memory from previous frames, enabling consistent object tracking across video sequences. For initial conditioning frames, it uses no-memory embeddings. For subsequent frames, it retrieves and integrates memory features from both conditioning frames (user interactions) and non-conditioning frames (tracked results) via cross-attention. Args: inference_session (`Sam2VideoInferenceSession`): The video inference session object. frame_idx (`int`): Index of the current frame being processed. obj_idx (`int`): Index of the object being processed. is_initial_conditioning_frame (`bool`): Whether this is an initial conditioning frame with user inputs (True) or a subsequent tracking frame (False). current_vision_features (`torch.Tensor`): Highest-level vision features of shape `(seq_len, batch_size, channels)`. current_vision_positional_embeddings (`torch.Tensor`): Positional embedding tensors corresponding to the highest-level vision features. num_total_frames (`int`): Total number of frames in the video sequence. track_in_reverse_time (`bool`, *optional*, defaults to `False`): Whether tracking is performed in reverse temporal order. streaming (`bool`, *optional*, defaults to `False`): Whether this is streaming inference mode. Returns: `torch.Tensor`: Memory-conditioned feature tensor of shape `(batch_size, channels, height, width)` suitable for input to the SAM decoder. """ # Get dimensions from the highest-level (lowest-resolution) feature map batch_size = current_vision_features.size(1) num_channels = self.hidden_dim height, width = self.backbone_feature_sizes[-1] device = current_vision_features.device # If memory is disabled (e.g., for single image SAM), return current features directly. if self.num_maskmem == 0: # Permute (SeqLen, Batch, Channels) -> (Batch, Channels, SeqLen) then view as (Batch, Channels, Height, Width) # Assuming SeqLen = Height * Width for the last feature map current_feature_map = current_vision_features.permute(1, 2, 0).view( batch_size, num_channels, height, width ) return current_feature_map num_object_pointer_tokens = 0 temporal_position_sign_multiplier = -1 if track_in_reverse_time else 1 # Step 1: Condition the visual features of the current frame on previous memories if not is_initial_conditioning_frame: # Retrieve memories encoded from previous frames memories_to_concatenate = [] memory_positional_embeddings_to_concatenate = [] # Ensure there are conditioning frame outputs to process conditioning_outputs = inference_session.output_dict_per_obj[obj_idx]["cond_frame_outputs"] if not conditioning_outputs: raise ValueError( "maskmem_features in conditioning outputs cannot be empty when not is_initial_conditioning_frame" ) # Select a maximum number of temporally closest conditioning frames for cross-attention (no limit here, as is the case in the original checkpoints) # Store (temporal_position, output_data) tuples temporal_positions_and_previous_outputs = [(0, out) for out in conditioning_outputs.values()] # Add non-conditioning memory frames (up to self.num_maskmem - 1) # These are typically frames tracked by the model without direct user input. # Frames are selected with a stride, prioritizing the most recent ones. Here we only support stride = 1 for simplicity. for relative_temporal_offset in range(self.num_maskmem - 1, 0, -1): # relative_temporal_offset: how many frames before (or after if reversing) the current frame if not track_in_reverse_time: previous_frame_idx = frame_idx - relative_temporal_offset else: previous_frame_idx = frame_idx + relative_temporal_offset # check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU output_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get( previous_frame_idx, None ) temporal_positions_and_previous_outputs.append((relative_temporal_offset, output_data)) for relative_temporal_offset, prev_output_data in temporal_positions_and_previous_outputs: if prev_output_data is None: continue # Skip if no output data for this temporal position (e.g., padding frames) # Load memory features (potentially from CPU to GPU) # Features are flattened: (Batch, Channels, H, W) -> (H*W, Batch, Channels) memory_features = prev_output_data["maskmem_features"].to(device, non_blocking=True) memories_to_concatenate.append(memory_features) # Spatial positional encoding (potentially from CPU to GPU) spatial_memory_pos_embed = prev_output_data["maskmem_pos_enc"].to(device, non_blocking=True) # Add temporal positional encoding # self.memory_temporal_positional_encoding shape: (NumMaskMem, 1, 1, MemDim) combined_memory_pos_embed = ( spatial_memory_pos_embed + self.memory_temporal_positional_encoding[relative_temporal_offset - 1] ) memory_positional_embeddings_to_concatenate.append(combined_memory_pos_embed) # Construct the list of past object pointers to be used in attention if streaming: max_object_pointers_to_use = self.config.max_object_pointers_in_encoder else: max_object_pointers_to_use = min(num_total_frames, self.config.max_object_pointers_in_encoder) temporal_diff_and_pointers = [] # Add object pointers from selected conditioning frames # Optionally, only include pointers from past frames during evaluation eligible_conditioning_outputs = conditioning_outputs if not self.training: eligible_conditioning_outputs = { temporal_idx: out for temporal_idx, out in conditioning_outputs.items() if (temporal_idx >= frame_idx if track_in_reverse_time else temporal_idx <= frame_idx) } for temporal_idx, out_data in eligible_conditioning_outputs.items(): temporal_difference = (frame_idx - temporal_idx) * temporal_position_sign_multiplier temporal_diff_and_pointers.append((temporal_difference, out_data["object_pointer"])) # Add object pointers from non-conditioning frames (up to max_object_pointers_to_use - 1) for t_diff_offset in range(1, max_object_pointers_to_use): ref_frame_idx = frame_idx + t_diff_offset if track_in_reverse_time else frame_idx - t_diff_offset if ref_frame_idx < 0 or ( not streaming and num_total_frames is not None and ref_frame_idx >= num_total_frames ): break # Stop if frame index is out of bounds # check if the output is already stored without using get_output to avoid unnecessary memory transfers between CPU and GPU out_data = inference_session.output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].get( ref_frame_idx, None ) if out_data is not None: temporal_diff_and_pointers.append((t_diff_offset, out_data["object_pointer"])) if temporal_diff_and_pointers: temporal_differences, object_pointers_list = zip(*temporal_diff_and_pointers) # Stack object pointers: List of (Batch, Channels) -> (SeqLen_ptr, Batch, Channels) object_pointers = torch.stack(object_pointers_list, dim=0) if self.config.enable_temporal_pos_encoding_for_object_pointers: max_temporal_diff = float(max_object_pointers_to_use - 1) # Determine dimensionality for temporal positional encoding of pointers pointer_tpos_dim = num_channels # Normalize temporal differences before sine PE calculation normalized_temporal_diffs = ( torch.tensor(temporal_differences, device=device, dtype=torch.float32) / max_temporal_diff ) sine_pe = get_1d_sine_pe(normalized_temporal_diffs, dim=pointer_tpos_dim).to(object_pointers.dtype) projected_sine_pe = self.temporal_positional_encoding_projection_layer(sine_pe) object_pointers_pos_embed = projected_sine_pe.unsqueeze(1).expand(-1, batch_size, self.mem_dim) else: object_pointers_pos_embed = object_pointers.new_zeros( len(temporal_differences), batch_size, self.mem_dim, dtype=object_pointers.dtype ) if self.mem_dim < num_channels: # If memory dimension is smaller, reshape/split pointers and repeat positional encoding num_splits = num_channels // self.mem_dim object_pointers = object_pointers.reshape(-1, batch_size, num_splits, self.mem_dim) object_pointers = object_pointers.permute(0, 2, 1, 3).flatten( 0, 1 ) # (SeqLen_ptr*num_splits, Batch, MemDim) object_pointers_pos_embed = object_pointers_pos_embed.repeat_interleave(num_splits, dim=0) memories_to_concatenate.append(object_pointers) memory_positional_embeddings_to_concatenate.append(object_pointers_pos_embed) num_object_pointer_tokens = object_pointers.shape[0] else: # For initial conditioning frames, no prior memory is used directly in this block. # The model might handle this with a special token or mechanism. # If configured, directly add a learnable "no memory" embedding. # current_vision_features has shape (SeqLen, Batch, Channels) conditioned_feature_map_flat = current_vision_features + self.no_memory_embedding # Reshape to (Batch, Channels, Height, Width) conditioned_feature_map = conditioned_feature_map_flat.permute(1, 2, 0).view( batch_size, num_channels, height, width ) return conditioned_feature_map # Step 2: Concatenate all retrieved memories and their positional embeddings. combined_memory = torch.cat(memories_to_concatenate, dim=0) combined_memory_positional_embeddings = torch.cat(memory_positional_embeddings_to_concatenate, dim=0) # Step 3: Forward through the memory attention mechanism. conditioned_feature_map_flat = self.memory_attention( current_vision_features=current_vision_features, current_vision_position_embeddings=current_vision_positional_embeddings, memory=combined_memory, memory_posision_embeddings=combined_memory_positional_embeddings, # Corrected typo from API num_object_pointer_tokens=num_object_pointer_tokens, ) # Reshape from (Batch, H*W, Channels) to (Batch, Channels, Height, Width) conditioned_feature_map = ( conditioned_feature_map_flat.squeeze(1).permute(0, 2, 1).view(batch_size, num_channels, height, width) ) return conditioned_feature_map def _use_multimask(self, is_init_cond_frame: bool, point_inputs: Optional[dict]) -> bool: """Whether to use multimask output in the SAM head.""" num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(2) multimask_output = ( self.config.multimask_output_in_sam and (is_init_cond_frame or self.config.multimask_output_for_tracking) and (self.config.multimask_min_pt_num <= num_pts <= self.config.multimask_max_pt_num) ) return multimask_output def _run_single_frame_inference( self, inference_session: Sam2VideoInferenceSession, frame_idx: int, obj_idx: int, batch_size: int, is_init_cond_frame: bool, point_inputs: Optional[torch.Tensor], mask_inputs: Optional[torch.Tensor], reverse: bool, run_mem_encoder: bool, prev_sam_mask_logits: Optional[torch.Tensor] = None, streaming: bool = False, ) -> dict[str, Any]: """ Perform a single tracking step for video object segmentation. Args: inference_session (`Sam2VideoInferenceSession`): The video inference session object. frame_idx (`int`): Index of the current frame. obj_idx (`int`): Index of the current object. batch_size (`int`): Batch size of the current frame. is_init_cond_frame (`bool`): Whether this is an initial conditioning frame with user inputs. point_inputs (`dict`, *optional*): Point prompt inputs for the current frame. mask_inputs (`torch.Tensor`, *optional*): Mask prompt inputs for the current frame. reverse (`bool`, *optional*, defaults to `False`): Whether to track in reverse time order. run_mem_encoder (`bool`, *optional*, defaults to `True`): Whether to run the memory encoder on predicted masks. prev_sam_mask_logits (`torch.Tensor`, *optional*): Previously predicted SAM mask logits that can be fed with new clicks. streaming (`bool`, *optional*, defaults to `False`): Whether this is streaming inference. Returns: `dict`: Dictionary containing the tracking results for the current frame, including: - pred_masks: Predicted low-resolution masks. - object_pointer: Object pointer for memory. - object_score_logits: Object score logits (inference only). - maskmem_features: Memory features for future frames. - maskmem_pos_enc: Memory positional encodings. """ # Retrieve correct image features current_vision_feats, current_vision_pos_embeds = self._prepare_vision_features( inference_session, frame_idx, batch_size ) # point and mask should not appear as input simultaneously on the same frame if point_inputs is not None and mask_inputs is not None: raise ValueError( "point_inputs and mask_inputs should not appear as input simultaneously on the same frame" ) # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW if len(current_vision_feats) > 1: high_res_features = [ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) for x, s in zip(current_vision_feats[:-1], self.backbone_feature_sizes[:-1]) ] else: high_res_features = None if mask_inputs is not None: # We directly output the mask input (see it as a GT mask) without using a SAM prompt encoder + mask decoder. pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *self.backbone_feature_sizes[-1]) sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs) else: # fused the visual feature with previous memory features in the memory bank pix_feat = self._prepare_memory_conditioned_features( inference_session=inference_session, frame_idx=frame_idx, obj_idx=obj_idx, is_initial_conditioning_frame=is_init_cond_frame, current_vision_features=current_vision_feats[-1], current_vision_positional_embeddings=current_vision_pos_embeds[-1], num_total_frames=inference_session.num_frames, track_in_reverse_time=reverse, streaming=streaming, ) # apply SAM-style segmentation head # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, # e.g. in demo where such logits come from earlier interaction instead of correction sampling # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead) if prev_sam_mask_logits is not None: mask_inputs = prev_sam_mask_logits multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) sam_outputs = self._single_frame_forward( pixel_values=None, # Vision features already computed input_points=point_inputs["point_coords"] if point_inputs is not None else None, input_labels=point_inputs["point_labels"] if point_inputs is not None else None, input_masks=mask_inputs, image_embeddings=high_res_features + [pix_feat], multimask_output=multimask_output, ) # Finally run the memory encoder on the predicted mask to encode # it into a new memory feature (which will be used to condition vision features in future frames) maskmem_features = None maskmem_pos_enc = None if run_mem_encoder and self.num_maskmem > 0: maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats[-1], pred_masks_high_res=sam_outputs.high_res_masks, object_score_logits=sam_outputs.object_score_logits, is_mask_from_pts=(point_inputs is not None or mask_inputs is not None), ) current_out = { "pred_masks": sam_outputs.pred_masks, "object_pointer": sam_outputs.object_pointer, "maskmem_features": maskmem_features if maskmem_features is not None else None, "maskmem_pos_enc": maskmem_pos_enc, } if not self.training: current_out["object_score_logits"] = sam_outputs.object_score_logits return current_out def _encode_new_memory( self, current_vision_feats: torch.Tensor, pred_masks_high_res: torch.Tensor, object_score_logits: torch.Tensor, is_mask_from_pts: bool, ) -> tuple[torch.Tensor, list[torch.Tensor]]: """Encode the current image and its prediction into a memory feature.""" batch_size = current_vision_feats.size(1) # batch size on this frame channels = self.hidden_dim height, width = self.backbone_feature_sizes[-1] # top-level (lowest-resolution) feature size # top-level feature, (HW)BC => BCHW pix_feat = current_vision_feats.permute(1, 2, 0).view(batch_size, channels, height, width) if is_mask_from_pts and not self.training: # binarize the mask logits mask_for_mem = (pred_masks_high_res > 0).to(pred_masks_high_res.dtype) else: # apply sigmoid on the raw mask logits to turn them into range (0, 1) mask_for_mem = torch.sigmoid(pred_masks_high_res) # apply scale and bias terms to the sigmoid probabilities mask_for_mem = mask_for_mem * self.config.sigmoid_scale_for_mem_enc mask_for_mem = mask_for_mem + self.config.sigmoid_bias_for_mem_enc maskmem_features, maskmem_pos_enc = self.memory_encoder( pix_feat, mask_for_mem, ) # add a no-object embedding to the spatial memory to indicate that the frame # is predicted to be occluded (i.e. no object is appearing in the frame) if self.occlusion_spatial_embedding_parameter is not None: is_obj_appearing = (object_score_logits > 0).float() maskmem_features += (1 - is_obj_appearing[..., None]) * self.occlusion_spatial_embedding_parameter[ ..., None, None ].expand(*maskmem_features.shape) # convert to bfloat16 to save memory, and for consistency with the original implementation maskmem_features = maskmem_features.to(torch.bfloat16).flatten(2).permute(2, 0, 1) maskmem_pos_enc = maskmem_pos_enc.to(pred_masks_high_res.dtype).flatten(2).permute(2, 0, 1) return maskmem_features, maskmem_pos_enc @torch.inference_mode() @auto_docstring( custom_intro=""" Propagate the objects through the video frames. Used when initializing an inference session with a whole video. Yields Sam2VideoSegmentationOutput for each frame. """ ) def propagate_in_video_iterator( self, inference_session: Sam2VideoInferenceSession, start_frame_idx: Optional[int] = None, max_frame_num_to_track: Optional[int] = None, reverse: bool = False, ) -> Iterator[Sam2VideoSegmentationOutput]: r""" inference_session (`Sam2VideoInferenceSession`): The video inference session object. start_frame_idx (`int`, *optional*): The starting frame index for propagation. Need to be provided if `forward` hasn't been called on new inputs yet. If not provided, the starting frame index will be the earliest frame with input points. max_frame_num_to_track (`int`, *optional*): The maximum number of frames to track. reverse (`bool`, *optional*, defaults to `False`): Whether to propagate in reverse. """ num_frames = inference_session.num_frames # set start index, end index, and processing order if start_frame_idx is None: # default: start from the earliest frame with input points frames_with_inputs = [ frame_idx for obj_output_dict in inference_session.output_dict_per_obj.values() for frame_idx in obj_output_dict["cond_frame_outputs"] ] if not frames_with_inputs: raise ValueError( "Cannot determine the starting frame index; please specify it manually, or run inference on a frame with inputs first." ) start_frame_idx = min(frames_with_inputs) if max_frame_num_to_track is None: # default: track all the frames in the video max_frame_num_to_track = num_frames if reverse: end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) if start_frame_idx > 0: processing_order = range(start_frame_idx, end_frame_idx - 1, -1) else: processing_order = [] # skip reverse tracking if starting from frame 0 else: end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1) processing_order = range(start_frame_idx, end_frame_idx + 1) for frame_idx in tqdm(processing_order, desc="propagate in video"): sam2_video_output = self(inference_session, frame_idx=frame_idx, reverse=reverse) yield sam2_video_output __all__ = ["Sam2VideoModel", "Sam2VideoInferenceSession", "Sam2VideoPreTrainedModel"]
transformers/src/transformers/models/sam2_video/modeling_sam2_video.py/0
{ "file_path": "transformers/src/transformers/models/sam2_video/modeling_sam2_video.py", "repo_id": "transformers", "token_count": 52863 }
549
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for SeamlessM4T.""" import os from shutil import copyfile from typing import Any, Optional, Union import sentencepiece as spm from ...convert_slow_tokenizer import import_protobuf from ...tokenization_utils import ( BatchEncoding, PreTokenizedInput, PreTrainedTokenizer, TextInput, ) from ...tokenization_utils_base import AddedToken from ...utils import PaddingStrategy, logging from ...utils.import_utils import requires logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} @requires(backends=("sentencepiece",)) class SeamlessM4TTokenizer(PreTrainedTokenizer): """ Construct a SeamlessM4T tokenizer. Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). The tokenization method is `<language code> <tokens> <eos>` for source language documents, and `<eos> <language code> <tokens> <eos>` for target language documents. Examples: ```python >>> from transformers import SeamlessM4TTokenizer >>> tokenizer = SeamlessM4TTokenizer.from_pretrained( ... "facebook/hf-seamless-m4t-medium", src_lang="eng", tgt_lang="fra" ... ) >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_french = "Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie." >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors="pt") ``` Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. tokenizer_file (`str`, *optional*): The path to a tokenizer file to use instead of the vocab file. src_lang (`str`, *optional*, defaults to `"eng"`): The language to use as source language for translation. tgt_lang (`str`, *optional*, defaults to `"fra"`): The language to use as target language for translation. sp_model_kwargs (`dict[str, Any]`, *optional*): Additional keyword arguments to pass to the model initialization. additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional special tokens. Can be used to specify the list of languages that will be supported by the tokenizer. add_prefix_space (`bool`, *optional*, defaults to `True`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: list[int] = [] suffix_tokens: list[int] = [] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", tokenizer_file=None, src_lang="eng", tgt_lang="fra", sp_model_kwargs: Optional[dict[str, Any]] = None, additional_special_tokens=None, add_prefix_space=True, **kwargs, ): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs # Add this unused argument to keep some important Copied from statements self.legacy = False self.vocab_file = vocab_file self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False)) # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # spm | '<unk>' | '<s>' | '</s>' | 'an' | 'en' | '_d' | 'er' | 'in' | '_s' | '_a' # fairseq | '<pad>' | '<unk>' | '<s>' | '</s>' | 'an' | 'en' | '▁d' | 'er' | 'in' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token self._added_tokens_decoder = { 0: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token, 1: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token, 2: AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token, 3: AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token, } # The first "real" token "an" has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.sp_model_size = len(self.sp_model) self._src_lang = f"__{src_lang}__" if "__" not in src_lang else src_lang self._tgt_lang = f"__{tgt_lang}__" if "__" not in tgt_lang else tgt_lang self.add_prefix_space = add_prefix_space super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, add_prefix_space=add_prefix_space, **kwargs, ) self.set_src_lang_special_tokens(self._src_lang) self.set_tgt_lang_special_tokens(self._tgt_lang) # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__getstate__ def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.__setstate__ def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): return len(self.sp_model) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] ] = None, padding: Union[bool, str, PaddingStrategy] = True, pad_to_multiple_of: Optional[int] = 2, src_lang: Optional[str] = None, tgt_lang: Optional[str] = None, **kwargs, ): """ Args: text (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_target (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). src_lang (`str`, *optional*): A string representing the source language. If not specified, the last `src_lang` specified (either during initialization or when calling this tokenizer) will be used. tgt_lang (`str`, *optional*): A string representing the target language. If not specified, the last `tgt_lang` specified (either during initialization or when calling this tokenizer) will be used. kwargs (*optional*): Remaining dictionary of keyword arguments that will be passed to [`PreTrainedTokenizer.__call__`]. """ if src_lang is not None: self.src_lang = src_lang if tgt_lang is not None: self.tgt_lang = tgt_lang output = super().__call__( text=text, text_pair=text_pair, text_target=text_target, text_pair_target=text_pair_target, padding=padding, pad_to_multiple_of=pad_to_multiple_of, **kwargs, ) return BatchEncoding(output, tensor_type=kwargs.get("return_tensors")) @property # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.src_lang def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: if "__" not in new_src_lang: self._src_lang = f"__{new_src_lang}__" else: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) @property def tgt_lang(self) -> str: return self._tgt_lang @tgt_lang.setter def tgt_lang(self, new_tgt_lang: str) -> None: if "__" not in new_tgt_lang: self._tgt_lang = f"__{new_tgt_lang}__" else: self._tgt_lang = new_tgt_lang self.set_tgt_lang_special_tokens(self._tgt_lang) # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.create_token_type_ids_from_sequences def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def _build_translation_inputs( self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs ): """Used by translation pipeline, to prepare inputs for the generate function""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model.") self.src_lang = src_lang inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs) if "__" not in tgt_lang: tgt_lang = f"__{tgt_lang}__" tgt_lang_id = self.convert_tokens_to_ids(tgt_lang) inputs["forced_bos_token_id"] = tgt_lang_id return inputs def get_vocab(self): vocab = { self.convert_ids_to_tokens(i): i for i in range(self.fairseq_offset, self.vocab_size + self.fairseq_offset) } vocab.update(self.added_tokens_encoder) return vocab @property def unk_token_length(self): return len(self.sp_model.encode(str(self.unk_token))) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor def get_spm_processor(self, from_slow=False): tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs) if self.legacy or from_slow: # no dependency on protobuf tokenizer.Load(self.vocab_file) return tokenizer with open(self.vocab_file, "rb") as f: sp_model = f.read() model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)") model = model_pb2.ModelProto.FromString(sp_model) normalizer_spec = model_pb2.NormalizerSpec() normalizer_spec.add_dummy_prefix = False model.normalizer_spec.MergeFrom(normalizer_spec) sp_model = model.SerializeToString() tokenizer.LoadFromSerializedProto(sp_model) return tokenizer # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize def tokenize(self, text: "TextInput", **kwargs) -> list[str]: """ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the first token is special. """ if self.legacy or len(text) == 0: return super().tokenize(text, **kwargs) text = text.replace(SPIECE_UNDERLINE, " ") if self.add_prefix_space: text = SPIECE_UNDERLINE + text tokens = super().tokenize(text, **kwargs) if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens: tokens = tokens[1:] return tokens # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize def _tokenize(self, text, **kwargs): """ Returns a tokenized string. We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`. `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`. """ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")): return self.sp_model.encode(text, out_type=str) # 1. Encode string + prefix ex: "<unk> Hey" tokens = self.sp_model.encode(self.unk_token + text, out_type=str) # 2. Remove self.unk_token from ['<','unk','>', '▁Hey'] return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" # since we manually add the prefix space, we have to remove it when decoding if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space: tokens[0] = tokens[0][1:] out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer.prepare_seq2seq_batch with eng_Latn->eng, fra_Latn->fra def prepare_seq2seq_batch( self, src_texts: list[str], src_lang: str = "eng", tgt_texts: Optional[list[str]] = None, tgt_lang: str = "fra", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_input_mode def _switch_to_input_mode(self): return self.set_src_lang_special_tokens(self.src_lang) # Copied from transformers.models.nllb.tokenization_nllb.NllbTokenizer._switch_to_target_mode def _switch_to_target_mode(self): return self.set_tgt_lang_special_tokens(self.tgt_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. Prefix=[src_lang_code], suffix = [eos] """ self.cur_lang_code = self.convert_tokens_to_ids(src_lang) self.init_kwargs["src_lang"] = src_lang if self.cur_lang_code == self.unk_token_id: logger.warning_once( f"`src_lang={src_lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id." ) self.prefix_tokens = [self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] # https://github.com/facebookresearch/fairseq2/blob/c53f18e6be6b8b46b722f2249b8397b7eccd7ad3/src/fairseq2/models/nllb/tokenizer.py#L112-L116 def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target lang setting. Prefix=[eos, tgt_lang_code] and suffix=[eos]. """ self.cur_lang_code = self.convert_tokens_to_ids(lang) self.init_kwargs["tgt_lang"] = lang if self.cur_lang_code == self.unk_token_id: logger.warning_once( f"`tgt_lang={lang}` has not be found in the vocabulary. Behaviour will probably be unexpected because the language token id will be replaced by the unknown token id." ) self.prefix_tokens = [self.eos_token_id, self.cur_lang_code] self.suffix_tokens = [self.eos_token_id] __all__ = ["SeamlessM4TTokenizer"]
transformers/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py/0
{ "file_path": "transformers/src/transformers/models/seamless_m4t/tokenization_seamless_m4t.py", "repo_id": "transformers", "token_count": 11101 }
550
# coding=utf-8 # Copyright 2021 NVIDIA The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SegFormer model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput, SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging from .configuration_segformer import SegformerConfig logger = logging.get_logger(__name__) class SegFormerImageClassifierOutput(ImageClassifierOutput): """ Base class for outputs of image classification models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Segformer class SegformerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class SegformerOverlapPatchEmbeddings(nn.Module): """Construct the overlapping patch embeddings.""" def __init__(self, patch_size, stride, num_channels, hidden_size): super().__init__() self.proj = nn.Conv2d( num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=patch_size // 2, ) self.layer_norm = nn.LayerNorm(hidden_size) def forward(self, pixel_values): embeddings = self.proj(pixel_values) _, _, height, width = embeddings.shape # (batch_size, num_channels, height, width) -> (batch_size, num_channels, height*width) -> (batch_size, height*width, num_channels) # this can be fed to a Transformer layer embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return embeddings, height, width class SegformerEfficientSelfAttention(nn.Module): """SegFormer's efficient self-attention mechanism. Employs the sequence reduction process introduced in the [PvT paper](https://huggingface.co/papers/2102.12122).""" def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size) self.key = nn.Linear(self.hidden_size, self.all_head_size) self.value = nn.Linear(self.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.sr_ratio = sequence_reduction_ratio if sequence_reduction_ratio > 1: self.sr = nn.Conv2d( hidden_size, hidden_size, kernel_size=sequence_reduction_ratio, stride=sequence_reduction_ratio ) self.layer_norm = nn.LayerNorm(hidden_size) def forward( self, hidden_states, height, width, output_attentions=False, ): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) if self.sr_ratio > 1: batch_size, seq_len, num_channels = hidden_states.shape # Reshape to (batch_size, num_channels, height, width) hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Apply sequence reduction hidden_states = self.sr(hidden_states) # Reshape back to (batch_size, seq_len, num_channels) hidden_states = hidden_states.reshape(batch_size, num_channels, -1).permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class SegformerSelfOutput(nn.Module): def __init__(self, config, hidden_size): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class SegformerAttention(nn.Module): def __init__(self, config, hidden_size, num_attention_heads, sequence_reduction_ratio): super().__init__() self.self = SegformerEfficientSelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.output = SegformerSelfOutput(config, hidden_size=hidden_size) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, hidden_states, height, width, output_attentions=False): self_outputs = self.self(hidden_states, height, width, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class SegformerDWConv(nn.Module): def __init__(self, dim=768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states class SegformerMixFFN(nn.Module): def __init__(self, config, in_features, hidden_features=None, out_features=None): super().__init__() out_features = out_features or in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = SegformerDWConv(hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, height, width): hidden_states = self.dense1(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class SegformerLayer(nn.Module): """This corresponds to the Block class in the original implementation.""" def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio): super().__init__() self.layer_norm_1 = nn.LayerNorm(hidden_size) self.attention = SegformerAttention( config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio, ) self.drop_path = SegformerDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states, height, width, output_attentions=False): self_attention_outputs = self.attention( self.layer_norm_1(hidden_states), # in Segformer, layernorm is applied before self-attention height, width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection (with stochastic depth) attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) # second residual connection (with stochastic depth) mlp_output = self.drop_path(mlp_output) layer_output = mlp_output + hidden_states outputs = (layer_output,) + outputs return outputs class SegformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config # stochastic depth decay rule drop_path_decays = [ x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu") ] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( SegformerOverlapPatchEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( SegformerLayer( config, hidden_size=config.hidden_sizes[i], num_attention_heads=config.num_attention_heads[i], drop_path=drop_path_decays[cur + j], sequence_reduction_ratio=config.sr_ratios[i], mlp_ratio=config.mlp_ratios[i], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) # Layer norms self.layer_norm = nn.ModuleList( [nn.LayerNorm(config.hidden_sizes[i]) for i in range(config.num_encoder_blocks)] ) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for idx, x in enumerate(zip(self.patch_embeddings, self.block, self.layer_norm)): embedding_layer, block_layer, norm_layer = x # first, obtain patch embeddings hidden_states, height, width = embedding_layer(hidden_states) # second, send embeddings through blocks for i, blk in enumerate(block_layer): layer_outputs = blk(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) # third, apply layer norm hidden_states = norm_layer(hidden_states) # fourth, optionally reshape back to (batch_size, num_channels, height, width) if idx != len(self.patch_embeddings) - 1 or ( idx == len(self.patch_embeddings) - 1 and self.config.reshape_last_stage ): hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class SegformerPreTrainedModel(PreTrainedModel): config: SegformerConfig base_model_prefix = "segformer" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class SegformerModel(SegformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = SegformerEncoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. """ ) class SegformerForImageClassification(SegformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.segformer = SegformerModel(config) # Classifier head self.classifier = nn.Linear(config.hidden_sizes[-1], config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SegFormerImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # convert last hidden states to (batch_size, height*width, hidden_size) batch_size = sequence_output.shape[0] if self.config.reshape_last_stage: # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) sequence_output = sequence_output.permute(0, 2, 3, 1) sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1]) # global average pooling sequence_output = sequence_output.mean(dim=1) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SegFormerImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class SegformerMLP(nn.Module): """ Linear Embedding. """ def __init__(self, config: SegformerConfig, input_dim): super().__init__() self.proj = nn.Linear(input_dim, config.decoder_hidden_size) def forward(self, hidden_states: torch.Tensor): hidden_states = hidden_states.flatten(2).transpose(1, 2) hidden_states = self.proj(hidden_states) return hidden_states class SegformerDecodeHead(SegformerPreTrainedModel): def __init__(self, config): super().__init__(config) # linear layers which will unify the channel dimension of each of the encoder blocks to the same config.decoder_hidden_size mlps = [] for i in range(config.num_encoder_blocks): mlp = SegformerMLP(config, input_dim=config.hidden_sizes[i]) mlps.append(mlp) self.linear_c = nn.ModuleList(mlps) # the following 3 layers implement the ConvModule of the original implementation self.linear_fuse = nn.Conv2d( in_channels=config.decoder_hidden_size * config.num_encoder_blocks, out_channels=config.decoder_hidden_size, kernel_size=1, bias=False, ) self.batch_norm = nn.BatchNorm2d(config.decoder_hidden_size) self.activation = nn.ReLU() self.dropout = nn.Dropout(config.classifier_dropout_prob) self.classifier = nn.Conv2d(config.decoder_hidden_size, config.num_labels, kernel_size=1) self.config = config def forward(self, encoder_hidden_states: torch.FloatTensor) -> torch.Tensor: batch_size = encoder_hidden_states[-1].shape[0] all_hidden_states = () for encoder_hidden_state, mlp in zip(encoder_hidden_states, self.linear_c): if self.config.reshape_last_stage is False and encoder_hidden_state.ndim == 3: height = width = int(math.sqrt(encoder_hidden_state.shape[-1])) encoder_hidden_state = ( encoder_hidden_state.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() ) # unify channel dimension height, width = encoder_hidden_state.shape[2], encoder_hidden_state.shape[3] encoder_hidden_state = mlp(encoder_hidden_state) encoder_hidden_state = encoder_hidden_state.permute(0, 2, 1) encoder_hidden_state = encoder_hidden_state.reshape(batch_size, -1, height, width) # upsample encoder_hidden_state = nn.functional.interpolate( encoder_hidden_state, size=encoder_hidden_states[0].size()[2:], mode="bilinear", align_corners=False ) all_hidden_states += (encoder_hidden_state,) hidden_states = self.linear_fuse(torch.cat(all_hidden_states[::-1], dim=1)) hidden_states = self.batch_norm(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states) # logits are of shape (batch_size, num_labels, height/4, width/4) logits = self.classifier(hidden_states) return logits @auto_docstring( custom_intro=""" SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. """ ) class SegformerForSemanticSegmentation(SegformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.segformer = SegformerModel(config) self.decode_head = SegformerDecodeHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoImageProcessor, SegformerForSemanticSegmentation >>> from PIL import Image >>> import requests >>> image_processor = AutoImageProcessor.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4) >>> list(logits.shape) [1, 150, 128, 128] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if labels is not None and self.config.num_labels < 1: raise ValueError(f"Number of labels should be >=0: {self.config.num_labels}") outputs = self.segformer( pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] logits = self.decode_head(encoder_hidden_states) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) if self.config.num_labels > 1: loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) loss = loss_fct(upsampled_logits, labels) elif self.config.num_labels == 1: valid_mask = ((labels >= 0) & (labels != self.config.semantic_loss_ignore_index)).float() loss_fct = BCEWithLogitsLoss(reduction="none") loss = loss_fct(upsampled_logits.squeeze(1), labels.float()) loss = (loss * valid_mask).mean() if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = [ "SegformerDecodeHead", "SegformerForImageClassification", "SegformerForSemanticSegmentation", "SegformerLayer", "SegformerModel", "SegformerPreTrainedModel", ]
transformers/src/transformers/models/segformer/modeling_segformer.py/0
{ "file_path": "transformers/src/transformers/models/segformer/modeling_segformer.py", "repo_id": "transformers", "token_count": 13950 }
551
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert SEW checkpoint.""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWDConfig, SEWDForCTC, SEWDModel, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "attention.self.query_proj": "encoder.encoder.layer.*.attention.self.query_proj", "attention.self.key_proj": "encoder.encoder.layer.*.attention.self.key_proj", "attention.self.value_proj": "encoder.encoder.layer.*.attention.self.value_proj", "attention.output.dense": "encoder.encoder.layer.*.attention.output.dense", "attention.output.LayerNorm": "encoder.encoder.layer.*.attention.output.LayerNorm", "intermediate.dense": "encoder.encoder.layer.*.intermediate.dense", "output.dense": "encoder.encoder.layer.*.output.dense", "output.LayerNorm": "encoder.encoder.layer.*.output.LayerNorm", "encoder.encoder.rel_embeddings": "encoder.encoder.rel_embeddings", "encoder.encoder.LayerNorm": "encoder.encoder.LayerNorm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.sew_d.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True else: for key, mapped_key in MAPPING.items(): mapped_key = "sew_d." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] if not layer_index.isnumeric(): continue mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "weight" in name: weight_type = "weight" elif "bias" in name: weight_type = "bias" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def convert_config(model, is_finetuned): config = SEWDConfig() if is_finetuned: fs_config = model.w2v_encoder.w2v_model.cfg else: fs_config = model.cfg config.conv_bias = fs_config.conv_bias conv_layers = eval(fs_config.conv_feature_layers) config.conv_dim = [x[0] for x in conv_layers] config.conv_kernel = [x[1] for x in conv_layers] config.conv_stride = [x[2] for x in conv_layers] config.feat_extract_activation = "gelu" config.feat_extract_norm = "layer" if fs_config.extractor_mode == "layer_norm" else "group" config.final_dropout = 0.0 config.hidden_act = fs_config.activation_fn.name config.hidden_size = fs_config.encoder_embed_dim config.initializer_range = 0.02 config.intermediate_size = fs_config.encoder_ffn_embed_dim config.layer_norm_eps = 1e-5 config.layerdrop = fs_config.encoder_layerdrop config.num_attention_heads = fs_config.encoder_attention_heads config.num_conv_pos_embedding_groups = fs_config.conv_pos_groups config.num_conv_pos_embeddings = fs_config.conv_pos config.num_feat_extract_layers = len(conv_layers) config.num_hidden_layers = fs_config.encoder_layers config.squeeze_factor = fs_config.squeeze_factor # DeBERTa-specific parameters: config.max_position_embeddings = fs_config.max_position_embeddings config.position_buckets = fs_config.position_buckets config.share_att_key = fs_config.share_att_key config.relative_attention = fs_config.relative_attention config.position_biased_input = fs_config.position_biased_input config.pos_att_type = tuple(fs_config.pos_att_type.split("|")) config.norm_rel_ebd = fs_config.norm_rel_ebd # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: fs_config = model.cfg config.final_dropout = fs_config.final_dropout config.layerdrop = fs_config.layerdrop config.activation_dropout = fs_config.activation_dropout config.apply_spec_augment = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 config.attention_dropout = fs_config.attention_dropout config.feat_proj_dropout = fs_config.dropout_input config.hidden_dropout = fs_config.dropout config.mask_feature_length = fs_config.mask_channel_length config.mask_feature_prob = fs_config.mask_channel_prob config.mask_time_length = fs_config.mask_length config.mask_time_prob = fs_config.mask_prob config.feature_extractor_type = "Wav2Vec2FeatureExtractor" config.tokenizer_class = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def convert_sew_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, dict_path=None, is_finetuned=True ): """ Copy/paste/tweak model's weights to transformers design. """ if is_finetuned: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) else: model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) if config_path is not None: config = SEWDConfig.from_pretrained(config_path) else: config = convert_config(model[0], is_finetuned) model = model[0].eval() return_attention_mask = config.feat_extract_norm == "layer" feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=return_attention_mask, ) if is_finetuned: if dict_path: target_dict = Dictionary.load(dict_path) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq target_dict.indices[target_dict.bos_word] = target_dict.pad_index target_dict.indices[target_dict.pad_word] = target_dict.bos_index config.bos_token_id = target_dict.pad_index config.pad_token_id = target_dict.bos_index config.eos_token_id = target_dict.eos_index config.vocab_size = len(target_dict.symbols) vocab_path = os.path.join(pytorch_dump_folder_path, "vocab.json") if not os.path.isdir(pytorch_dump_folder_path): logger.error(f"--pytorch_dump_folder_path ({pytorch_dump_folder_path}) should be a directory") return os.makedirs(pytorch_dump_folder_path, exist_ok=True) with open(vocab_path, "w", encoding="utf-8") as vocab_handle: json.dump(target_dict.indices, vocab_handle) tokenizer = Wav2Vec2CTCTokenizer( vocab_path, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=False, ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) processor.save_pretrained(pytorch_dump_folder_path) hf_model = SEWDForCTC(config) else: hf_model = SEWDModel(config) feature_extractor.save_pretrained(pytorch_dump_folder_path) recursively_load_weights(model, hf_model, is_finetuned) hf_model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) args = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
transformers/src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/sew_d/convert_sew_d_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 5959 }
552
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/siglip2/modular_siglip2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_siglip2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Siglip2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Siglip2TextModel`]. It is used to instantiate a Siglip2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the text encoder of the Siglip2 [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Siglip2 text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Siglip2Model`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 64): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. pad_token_id (`int`, *optional*, defaults to 1): The id of the padding token in the vocabulary. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the vocabulary. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the vocabulary. projection_size (`int`, *optional*, defaults to `hidden_size`): The size of the projection head. Example: ```python >>> from transformers import Siglip2TextConfig, Siglip2TextModel >>> # Initializing a Siglip2TextConfig with google/siglip2-base-patch16-224 style configuration >>> configuration = Siglip2TextConfig() >>> # Initializing a Siglip2TextModel (with random weights) from the google/siglip2-base-patch16-224 style configuration >>> model = Siglip2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "siglip2_text_model" base_config_key = "text_config" def __init__( self, vocab_size=32000, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, max_position_embeddings=64, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, # This differs from `CLIPTokenizer`'s default and from openai/siglip2 # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538 pad_token_id=1, bos_token_id=49406, eos_token_id=49407, projection_size=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.attention_dropout = attention_dropout self.projection_size = projection_size if projection_size is not None else hidden_size class Siglip2VisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Siglip2VisionModel`]. It is used to instantiate a Siglip2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip2 [google/siglip2-base-patch16-naflex](https://huggingface.co/google/siglip2-base-patch16-naflex) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. num_patches (`int`, *optional*, defaults to 256): The number of patches in the image with the size of (`patch_size`, `patch_size`). The image is resized to fill maximum of this number of patches, and to preserve the aspect ratio. In case the resulted number of patches is lower, the image is padded in "patch" dimension. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. Example: ```python >>> from transformers import Siglip2VisionConfig, Siglip2VisionModel >>> # Initializing a Siglip2VisionConfig with google/siglip2-base-patch16-naflex style configuration >>> configuration = Siglip2VisionConfig() >>> # Initializing a Siglip2VisionModel (with random weights) from the google/siglip2-base-patch16-naflex style configuration >>> model = Siglip2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "siglip2_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, num_patches=256, patch_size=16, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.patch_size = patch_size self.attention_dropout = attention_dropout self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.num_patches = num_patches class Siglip2Config(PretrainedConfig): r""" [`Siglip2Config`] is the configuration class to store the configuration of a [`Siglip2Model`]. It is used to instantiate a Siglip2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the Siglip2 [google/siglip2-base-patch16-224](https://huggingface.co/google/siglip2-base-patch16-224) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Siglip2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Siglip2VisionConfig`]. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import Siglip2Config, Siglip2Model >>> # Initializing a Siglip2Config with google/siglip2-base-patch16-224 style configuration >>> configuration = Siglip2Config() >>> # Initializing a Siglip2Model (with random weights) from the google/siglip2-base-patch16-224 style configuration >>> model = Siglip2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> # We can also initialize a Siglip2Config from a Siglip2TextConfig and a Siglip2VisionConfig >>> from transformers import Siglip2TextConfig, Siglip2VisionConfig >>> # Initializing a Siglip2Text and Siglip2Vision configuration >>> config_text = Siglip2TextConfig() >>> config_vision = Siglip2VisionConfig() >>> config = Siglip2Config.from_text_vision_configs(config_text, config_vision) ```""" model_type = "siglip2" sub_configs = {"text_config": Siglip2TextConfig, "vision_config": Siglip2VisionConfig} def __init__(self, text_config=None, vision_config=None, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `Siglip2TextConfig` with default values.") if vision_config is None: vision_config = {} logger.info("`vision_config` is `None`. initializing the `Siglip2VisionConfig` with default values.") self.text_config = Siglip2TextConfig(**text_config) self.vision_config = Siglip2VisionConfig(**vision_config) self.initializer_factor = 1.0 __all__ = ["Siglip2Config", "Siglip2TextConfig", "Siglip2VisionConfig"]
transformers/src/transformers/models/siglip2/configuration_siglip2.py/0
{ "file_path": "transformers/src/transformers/models/siglip2/configuration_siglip2.py", "repo_id": "transformers", "token_count": 4835 }
553
# coding=utf-8 # Copyright 2025 the HuggingFace Inc. team. All rights reserved. # Written by Orr Zohar # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...cache_utils import Cache, DynamicCache from ...generation import GenerationConfig from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, logging from ..idefics3.configuration_idefics3 import Idefics3Config, Idefics3VisionConfig from ..idefics3.image_processing_idefics3 import Idefics3ImageProcessor from ..idefics3.image_processing_idefics3_fast import Idefics3ImageProcessorFast from ..idefics3.modeling_idefics3 import ( Idefics3BaseModelOutputWithPast, Idefics3ForConditionalGeneration, Idefics3Model, Idefics3PreTrainedModel, Idefics3VisionTransformer, ) logger = logging.get_logger(__name__) class SmolVLMVisionConfig(Idefics3VisionConfig): r""" This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration >>> configuration = SmolVLMVisionConfig() >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration >>> model = SmolVLMVisionTransformer(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "smolvlm_vision" pass class SmolVLMPreTrainedModel(Idefics3PreTrainedModel): pass class SmolVLMVisionTransformer(Idefics3VisionTransformer): pass class SmolVLMConfig(Idefics3Config): r""" This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should cache the key/value pairs of the attention mechanism. Only relevant if `config.is_decoder=True`. image_token_id (`int`, *optional*, defaults to 128257): The id of the "image" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether or not to tie the word embeddings with the token embeddings. vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`): Custom vision config or dict for the vision tower text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`): Custom text config or dict for the text model scale_factor (`int`, *optional*, defaults to 2): The scale factor for the image encoder. pad_token_id (`int`, *optional*, defaults to 128002): The id of the padding token. Example: ```python >>> from transformers import SmolVLMModel, SmolVLMConfig >>> # Initializing configuration >>> configuration = SmolVLMConfig() >>> # Initializing a model from the configuration >>> model = SmolVLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "smolvlm" pass class SmolVLMImageProcessor(Idefics3ImageProcessor): pass class SmolVLMImageProcessorFast(Idefics3ImageProcessorFast): pass class SmolVLMBaseModelOutputWithPast(Idefics3BaseModelOutputWithPast): pass class SmolVLMModel(Idefics3Model): """ A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger in forward. Instead, we override inputs_merger here with custom logic. """ def inputs_merger( self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, image_hidden_states: torch.Tensor ): _, patch_size, _ = image_hidden_states.shape if input_ids is None: image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) image_mask = image_mask[..., 0] # slice off the hidden dim else: image_mask = input_ids == self.config.image_token_id num_image_tokens = image_mask.sum(dim=1) if not torch.all(num_image_tokens % patch_size == 0): raise ValueError("At least one sample has <image> tokens not divisible by patch_size.") blocks_per_sample = num_image_tokens // patch_size offsets = torch.nn.functional.pad(blocks_per_sample.cumsum(dim=0), (1, 0), value=0) block_offset = offsets[:-1] row_cum = image_mask.cumsum(dim=-1) chunk_idx = (row_cum - 1) // patch_size local_idx = (row_cum - 1) % patch_size block_idx = block_offset.unsqueeze(1) + chunk_idx image_embeds = torch.zeros_like(inputs_embeds) image_embeds[image_mask] = image_hidden_states[block_idx[image_mask], local_idx[image_mask], :] merged_embeds = torch.where(image_mask.unsqueeze(-1), image_embeds, inputs_embeds) return merged_embeds def get_image_features(self, pixel_values: torch.FloatTensor, pixel_attention_mask: torch.LongTensor = None): """ Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. pixel_attention_mask (`torch.LongTensor`, *optional*): The attention mask indicating padded regions in the image. """ batch_size, num_images, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:]) # Remove padding images - padding images are full 0. nb_values_per_image = pixel_values.shape[1:].numel() real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image if not any(real_images_inds): # no images, leave one empty image. real_images_inds[0] = True pixel_values = pixel_values[real_images_inds].contiguous() # Handle the vision attention mask if pixel_attention_mask is None: pixel_attention_mask = torch.ones( size=[pixel_values.shape[i] for i in (0, 2, 3)], dtype=torch.bool, device=pixel_values.device, ) else: # Remove padding images from the mask pixel_attention_mask = pixel_attention_mask.view(batch_size * num_images, *pixel_attention_mask.shape[2:]) pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous() patch_size = self.config.vision_config.patch_size patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size) patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size) patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() # Get sequence from the vision encoder image_hidden_states = self.vision_model(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) image_hidden_states = image_hidden_states.last_hidden_state # Modality projection & resampling image_hidden_states = self.connector(image_hidden_states) return image_hidden_states @can_return_tuple @auto_docstring( custom_intro=""" Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where max_num_images is the maximum number of images among the batch_size samples in the batch. Padding images are not needed beyond padding the pixel_values at the entrance of the model. For efficiency, we only pass through the vision_model's forward the real images by discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3. """ ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_attention_mask: Optional[torch.BoolTensor] = None, image_hidden_states: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, SmolVLMBaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.training and self.text_model.gradient_checkpointing and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # retrieve input_ids and inputs_embeds if input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.text_model.get_input_embeddings()(input_ids).to(input_ids.device) # START VISUAL INPUTS INTEGRATION if pixel_values is not None and image_hidden_states is not None: raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time") if pixel_values is not None: image_hidden_states = self.get_image_features(pixel_values, pixel_attention_mask).to(inputs_embeds.device) elif image_hidden_states is not None: image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=inputs_embeds.device) if image_hidden_states is not None: # When we generate, we don't want to replace the potential image_token_id that we generated by images # that simply don't exist inputs_embeds = self.inputs_merger( input_ids=input_ids, inputs_embeds=inputs_embeds, image_hidden_states=image_hidden_states, ) outputs = self.text_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return SmolVLMBaseModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_hidden_states, ) class SmolVLMForConditionalGeneration(Idefics3ForConditionalGeneration): def __init__(self, config): super().__init__(config) self.model = SmolVLMModel(config) self.model.text_model.generation_config = GenerationConfig.from_model_config(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def forward(self, **super_kwargs): r""" pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*): Mask to avoid performing attention on padding pixel indices. image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The hidden states of the image encoder after modality projection. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or `model.image_token_id`. Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> import requests >>> import torch >>> from PIL import Image >>> from io import BytesIO >>> from transformers import AutoProcessor, AutoModelForImageTextToText >>> from transformers.image_utils import load_image >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg") >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg") >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg") >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct") >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", dtype=torch.bfloat16, device_map="auto") >>> # Create inputs >>> messages = [ ... { ... "role": "user", ... "content": [ ... {"type": "video", "path": path/to/video}, ... {"type": "text", "text": "What is happening in this video?"}, ... ] ... } ... ] >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True) >>> # Generate >>> generated_ids = model.generate(**inputs, max_new_tokens=256) >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> print(generated_texts) ```""" super().forward(**super_kwargs) __all__ = [ "SmolVLMVisionConfig", "SmolVLMConfig", "SmolVLMImageProcessor", "SmolVLMImageProcessorFast", "SmolVLMForConditionalGeneration", "SmolVLMPreTrainedModel", "SmolVLMModel", "SmolVLMVisionTransformer", ]
transformers/src/transformers/models/smolvlm/modular_smolvlm.py/0
{ "file_path": "transformers/src/transformers/models/smolvlm/modular_smolvlm.py", "repo_id": "transformers", "token_count": 7431 }
554
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Speech2Text.""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Optional, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ...utils.import_utils import requires logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } MAX_MODEL_INPUT_SIZES = { "facebook/s2t-small-librispeech-asr": 1024, } MUSTC_LANGS = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] LANGUAGES = {"mustc": MUSTC_LANGS} @requires(backends=("sentencepiece",)) class Speech2TextTokenizer(PreTrainedTokenizer): """ Construct an Speech2Text tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. spm_file (`str`): Path to the [SentencePiece](https://github.com/google/sentencepiece) model file bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. do_upper_case (`bool`, *optional*, defaults to `False`): Whether or not to uppercase the output when decoding. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. tgt_lang (`str`, *optional*): A string representing the target language. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: list[int] = [] def __init__( self, vocab_file, spm_file, bos_token="<s>", eos_token="</s>", pad_token="<pad>", unk_token="<unk>", do_upper_case=False, do_lower_case=False, tgt_lang=None, lang_codes=None, additional_special_tokens=None, sp_model_kwargs: Optional[dict[str, Any]] = None, **kwargs, ) -> None: self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.do_upper_case = do_upper_case self.do_lower_case = do_lower_case self.encoder = load_json(vocab_file) self.decoder = {v: k for k, v in self.encoder.items()} self.spm_file = spm_file self.sp_model = load_spm(spm_file, self.sp_model_kwargs) if lang_codes is not None: self.lang_codes = lang_codes self.langs = LANGUAGES[lang_codes] self.lang_tokens = [f"<lang:{lang}>" for lang in self.langs] self.lang_code_to_id = {lang: self.sp_model.PieceToId(f"<lang:{lang}>") for lang in self.langs} if additional_special_tokens is not None: additional_special_tokens = self.lang_tokens + additional_special_tokens else: additional_special_tokens = self.lang_tokens self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang) else: self.lang_code_to_id = {} super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, do_upper_case=do_upper_case, do_lower_case=do_lower_case, tgt_lang=tgt_lang, lang_codes=lang_codes, sp_model_kwargs=self.sp_model_kwargs, additional_special_tokens=additional_special_tokens, **kwargs, ) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> dict: vocab = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab @property def tgt_lang(self) -> str: return self._tgt_lang @tgt_lang.setter def tgt_lang(self, new_tgt_lang) -> None: self._tgt_lang = new_tgt_lang self.set_tgt_lang_special_tokens(new_tgt_lang) def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None: """Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos].""" lang_code_id = self.lang_code_to_id[tgt_lang] self.prefix_tokens = [lang_code_id] def _tokenize(self, text: str) -> list[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder[self.unk_token]) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the decoder.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: list[str]) -> str: """Converts a sequence of tokens (strings for sub-words) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: decoded = self.sp_model.decode(current_sub_tokens) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " current_sub_tokens = [] else: current_sub_tokens.append(token) decoded = self.sp_model.decode(current_sub_tokens) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id] def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def __getstate__(self) -> dict: state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d: dict) -> None: self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: save_dir = Path(save_directory) assert save_dir.is_dir(), f"{save_directory} should be a directory" vocab_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"] ) spm_save_path = save_dir / ( (filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"] ) save_json(self.encoder, vocab_save_path) if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file): copyfile(self.spm_file, spm_save_path) elif not os.path.isfile(self.spm_file): with open(spm_save_path, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (str(vocab_save_path), str(spm_save_path)) def load_spm(path: str, sp_model_kwargs: dict[str, Any]) -> sentencepiece.SentencePieceProcessor: spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs) spm.Load(str(path)) return spm def load_json(path: str) -> Union[dict, list]: with open(path, "r") as f: return json.load(f) def save_json(data, path: str) -> None: with open(path, "w") as f: json.dump(data, f, indent=2) __all__ = ["Speech2TextTokenizer"]
transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/tokenization_speech_to_text.py", "repo_id": "transformers", "token_count": 5017 }
555
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SqueezeBERT model configuration""" from collections import OrderedDict from collections.abc import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SqueezeBERT [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SqueezeBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): pad_token_id (`int`, *optional*, defaults to 0): The ID of the token in the word embedding to use as padding. embedding_size (`int`, *optional*, defaults to 768): The dimension of the word embedding vectors. q_groups (`int`, *optional*, defaults to 4): The number of groups in Q layer. k_groups (`int`, *optional*, defaults to 4): The number of groups in K layer. v_groups (`int`, *optional*, defaults to 4): The number of groups in V layer. post_attention_groups (`int`, *optional*, defaults to 1): The number of groups in the first feed forward network layer. intermediate_groups (`int`, *optional*, defaults to 4): The number of groups in the second feed forward network layer. output_groups (`int`, *optional*, defaults to 4): The number of groups in the third feed forward network layer. Examples: ```python >>> from transformers import SqueezeBertConfig, SqueezeBertModel >>> # Initializing a SqueezeBERT configuration >>> configuration = SqueezeBertConfig() >>> # Initializing a model (with random weights) from the configuration above >>> model = SqueezeBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "squeezebert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=768, q_groups=4, k_groups=4, v_groups=4, post_attention_groups=1, intermediate_groups=4, output_groups=4, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert class SqueezeBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] ) __all__ = ["SqueezeBertConfig", "SqueezeBertOnnxConfig"]
transformers/src/transformers/models/squeezebert/configuration_squeezebert.py/0
{ "file_path": "transformers/src/transformers/models/squeezebert/configuration_squeezebert.py", "repo_id": "transformers", "token_count": 2798 }
556
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 Swin Transformer model.""" from __future__ import annotations import collections.abc import math import warnings from collections.abc import Iterable from dataclasses import dataclass from functools import partial from typing import Any, Callable import tensorflow as tf from ...activations_tf import ACT2FN from ...modeling_tf_utils import ( TFPreTrainedModel, TFSequenceClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_swin import SwinConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "SwinConfig" # Base docstring _CHECKPOINT_FOR_DOC = "microsoft/swin-tiny-patch4-window7-224" _EXPECTED_OUTPUT_SHAPE = [1, 49, 768] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "microsoft/swin-tiny-patch4-window7-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" # drop_path, TFSwinPatchEmbeddings, TFSwinPatchMerging and TFSwinDropPath are tensorflow # implementations of PyTorch functionalities in the timm library. @dataclass class TFSwinEncoderOutput(ModelOutput): """ Swin encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None reshaped_hidden_states: tuple[tf.Tensor, ...] | None = None @dataclass class TFSwinModelOutput(ModelOutput): """ Swin model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`tf.Tensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: tf.Tensor | None = None pooler_output: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None reshaped_hidden_states: tuple[tf.Tensor, ...] | None = None @dataclass class TFSwinMaskedImageModelingOutput(ModelOutput): """ Swin masked image model outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Masked image modeling (MLM) loss. reconstruction (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed pixel values. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: tf.Tensor | None = None reconstruction: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None reshaped_hidden_states: tuple[tf.Tensor, ...] | None = None @property def logits(self): warnings.warn( "logits attribute is deprecated and will be removed in version 5 of Transformers." " Please use the reconstruction attribute to retrieve the final output instead.", FutureWarning, ) return self.reconstruction @dataclass class TFSwinImageClassifierOutput(ModelOutput): """ Swin outputs for image classification. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None reshaped_hidden_states: tuple[tf.Tensor, ...] | None = None def window_partition(input_feature: tf.Tensor, window_size: int) -> tf.Tensor: """ Partitions the given input into windows. """ batch_size, height, width, num_channels = shape_list(input_feature) input_feature = tf.reshape( input_feature, (batch_size, height // window_size, window_size, width // window_size, window_size, num_channels), ) windows = tf.transpose(input_feature, (0, 1, 3, 2, 4, 5)) windows = tf.reshape(windows, (-1, window_size, window_size, num_channels)) return windows def window_reverse(windows: tf.Tensor, window_size: int, height: int, width: int) -> tf.Tensor: """ Merges windows to produce higher resolution features. """ x = tf.shape(windows)[0] y = tf.cast(height * width / (window_size * window_size), tf.int32) batch_size = tf.math.floordiv(x, y) windows = tf.reshape( windows, (batch_size, height // window_size, width // window_size, window_size, window_size, -1) ) windows = tf.transpose(windows, (0, 1, 3, 2, 4, 5)) windows = tf.reshape(windows, (batch_size, height, width, -1)) return windows def drop_path( input: tf.Tensor, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True ) -> tf.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob input_shape = shape_list(input) ndim = len(input_shape) shape = [input_shape[0]] + [1] * (ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = tf.random.uniform(shape) random_tensor = tf.where(random_tensor <= keep_prob, 1.0, 0.0) if keep_prob > 0.0 and scale_by_keep: random_tensor /= keep_prob return input * random_tensor class TFSwinEmbeddings(keras.layers.Layer): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config: SwinConfig, use_mask_token: bool = False, **kwargs) -> None: super().__init__(**kwargs) self.patch_embeddings = TFSwinPatchEmbeddings(config, name="patch_embeddings") self.num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.embed_dim = config.embed_dim self.use_mask_token = use_mask_token self.use_absolute_embeddings = config.use_absolute_embeddings self.norm = keras.layers.LayerNormalization(name="norm", epsilon=1e-5) self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") self.config = config def build(self, input_shape: tf.TensorShape) -> None: if self.use_mask_token: self.mask_token = self.add_weight(shape=(1, 1, self.embed_dim), initializer="zeros", name="mask_token") else: self.mask_token = None if self.use_absolute_embeddings: self.position_embeddings = self.add_weight( (1, self.num_patches + 1, self.embed_dim), initializer="zeros", name="positional_embeddings" ) else: self.position_embeddings = None if self.built: return self.built = True if getattr(self, "patch_embeddings", None) is not None: with tf.name_scope(self.patch_embeddings.name): self.patch_embeddings.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build([None, None, self.config.embed_dim]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) def call( self, pixel_values: tf.Tensor, bool_masked_pos: bool | None = None, training: bool = False ) -> tuple[tf.Tensor, tuple[int, int]]: embeddings, output_dimensions = self.patch_embeddings(pixel_values, training=training) embeddings = self.norm(embeddings, training=training) batch_size, seq_len, _ = shape_list(embeddings) if bool_masked_pos is not None: mask_tokens = tf.repeat(self.mask_token, batch_size, 0) mask_tokens = tf.repeat(mask_tokens, seq_len, 1) # replace the masked visual tokens by mask_tokens mask = tf.expand_dims(bool_masked_pos, -1) mask = tf.cast(mask, mask_tokens.dtype) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings, training=training) return embeddings, output_dimensions class TFSwinPatchEmbeddings(keras.layers.Layer): """ Image to Patch Embedding. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = keras.layers.Conv2D( filters=hidden_size, kernel_size=self.patch_size, strides=self.patch_size, padding="valid", name="projection", ) def maybe_pad(self, pixel_values: tf.Tensor, height: int, width: int) -> tf.Tensor: if width % self.patch_size[1] != 0: pad_values = ((0, 0), (0, 0), (0, 0), (0, self.patch_size[1] - width % self.patch_size[1])) pixel_values = tf.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = ((0, 0), (0, 0), (0, self.patch_size[0] - height % self.patch_size[0]), (0, 0)) pixel_values = tf.pad(pixel_values, pad_values) return pixel_values def call(self, pixel_values: tf.Tensor, training: bool = False) -> tuple[tf.Tensor, tuple[int, int]]: _, num_channels, height, width = shape_list(pixel_values) if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) # B,C,H,W -> B,H,W,C pixel_values = tf.transpose(pixel_values, (0, 2, 3, 1)) embeddings = self.projection(pixel_values, training=training) # B,H,W,C -> B,C,H,W embeddings = tf.transpose(embeddings, (0, 3, 1, 2)) batch_size, channels, height, width = shape_list(embeddings) output_dimensions = (height, width) embeddings = tf.reshape(embeddings, (batch_size, channels, -1)) embeddings = tf.transpose(embeddings, (0, 2, 1)) return embeddings, output_dimensions def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "projection", None) is not None: with tf.name_scope(self.projection.name): self.projection.build([None, None, None, self.num_channels]) class TFSwinPatchMerging(keras.layers.Layer): """ Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`keras.layer.Layer`, *optional*, defaults to `keras.layers.LayerNormalization`): Normalization layer class. """ def __init__( self, input_resolution: tuple[int, int], dim: int, norm_layer: Callable | None = None, **kwargs ) -> None: super().__init__(**kwargs) self.input_resolution = input_resolution self.dim = dim self.reduction = keras.layers.Dense(2 * dim, use_bias=False, name="reduction") if norm_layer is None: # Use same default epsilon as PyTorch self.norm = keras.layers.LayerNormalization(epsilon=1e-5, name="norm") else: self.norm = norm_layer(name="norm") def maybe_pad(self, input_feature: tf.Tensor, height: int, width: int) -> tf.Tensor: should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = ((0, 0), (0, height % 2), (0, width % 2), (0, 0)) input_feature = tf.pad(input_feature, pad_values) return input_feature def call(self, input_feature: tf.Tensor, input_dimensions: tuple[int, int], training: bool = False) -> tf.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, _, num_channels = shape_list(input_feature) input_feature = tf.reshape(input_feature, (batch_size, height, width, num_channels)) # pad input to be divisible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = tf.concat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = tf.reshape( input_feature, (batch_size, -1, 4 * num_channels) ) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature, training=training) input_feature = self.reduction(input_feature, training=training) return input_feature def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "reduction", None) is not None: with tf.name_scope(self.reduction.name): self.reduction.build([None, None, 4 * self.dim]) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build([None, None, 4 * self.dim]) class TFSwinDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: float | None = None, scale_by_keep: bool = True, **kwargs) -> None: super().__init__(**kwargs) self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def call(self, input: tf.Tensor, training: bool = False) -> tf.Tensor: return drop_path(input, self.drop_prob, training, self.scale_by_keep) class TFSwinSelfAttention(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: super().__init__(**kwargs) if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size window_size = config.window_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), use_bias=config.qkv_bias, name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) def build(self, input_shape: tf.TensorShape) -> None: self.relative_position_bias_table = self.add_weight( shape=(((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1)), self.num_attention_heads), initializer="zeros", name="relative_position_bias_table", ) self.relative_position_index = self.add_weight( shape=(self.window_size[0] ** 2, self.window_size[1] ** 2), trainable=False, dtype=tf.int32, name="relative_position_index", ) # get pair-wise relative position index for each token inside the window coords_h = tf.range(self.window_size[0]) coords_w = tf.range(self.window_size[1]) coords = tf.stack(tf.meshgrid(coords_h, coords_w, indexing="ij")) coords_flatten = tf.reshape(coords, (shape_list(coords)[0], -1)) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = tf.transpose(relative_coords, (1, 2, 0)) stack_0, stack_1 = tf.unstack(relative_coords, axis=2) stack_0 += self.window_size[0] - 1 stack_0 *= 2 * self.window_size[1] - 1 stack_1 += self.window_size[1] - 1 relative_coords = tf.stack([stack_0, stack_1], axis=2) self.relative_position_index.assign(tf.cast(tf.reduce_sum(relative_coords, axis=-1), tf.int32)) if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.all_head_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.all_head_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.all_head_size]) def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: new_x_shape = shape_list(x)[:-1] + [self.num_attention_heads, self.attention_head_size] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tuple[tf.Tensor, ...]: batch_size, dim, _ = shape_list(hidden_states) mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, (0, 1, 3, 2))) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = tf.gather( self.relative_position_bias_table, tf.reshape(self.relative_position_index, (-1,)) ) relative_position_bias = tf.reshape( relative_position_bias, (self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1), ) relative_position_bias = tf.transpose(relative_position_bias, (2, 0, 1)) attention_scores = attention_scores + tf.expand_dims(relative_position_bias, 0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SwinModel call() function) mask_shape = shape_list(attention_mask)[0] attention_scores = tf.reshape( attention_scores, (batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim) ) attention_mask = tf.expand_dims(attention_mask, 1) attention_mask = tf.expand_dims(attention_mask, 0) attention_scores = attention_scores + attention_mask attention_scores = tf.reshape(attention_scores, (-1, self.num_attention_heads, dim, dim)) # Normalize the attention scores to probabilities. attention_probs = tf.nn.softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, (0, 2, 1, 3)) new_context_layer_shape = shape_list(context_layer)[:-2] + [ self.all_head_size, ] context_layer = tf.reshape(context_layer, new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFSwinSelfOutput(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(dim, name="dense") self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob, name="dropout") self.dim = dim def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.dim]) if getattr(self, "dropout", None) is not None: with tf.name_scope(self.dropout.name): self.dropout.build(None) class TFSwinAttention(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> None: super().__init__(**kwargs) self.self = TFSwinSelfAttention(config, dim, num_heads, name="self") self.self_output = TFSwinSelfOutput(config, dim, name="output") self.pruned_heads = set() def prune_heads(self, heads): """ Prunes heads of the model. See base class PreTrainedModel heads: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tf.Tensor: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions, training=training) attention_output = self.self_output(self_outputs[0], hidden_states, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "self_output", None) is not None: with tf.name_scope(self.self_output.name): self.self_output.build(None) class TFSwinIntermediate(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(int(config.mlp_ratio * dim), name="dense") if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dim = dim def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.dim]) class TFSwinOutput(keras.layers.Layer): def __init__(self, config: SwinConfig, dim: int, **kwargs) -> None: super().__init__(**kwargs) self.dense = keras.layers.Dense(dim, name="dense") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, "dropout") self.config = config self.dim = dim def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, int(self.config.mlp_ratio * self.dim)]) class TFSwinLayer(keras.layers.Layer): def __init__( self, config, dim, input_resolution: tuple[int, int], num_heads: int, drop_path_rate: float = 0.0, shift_size: int = 0, **kwargs, ) -> None: super().__init__(**kwargs) self.chunk_size_feed_forward = config.chunk_size_feed_forward min_res = tf.reduce_min(input_resolution) self.window_size = min_res if min_res <= config.window_size else config.window_size self.shift_size = 0 if min_res <= self.window_size else shift_size self.input_resolution = input_resolution self.layernorm_before = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_before") self.attention = TFSwinAttention(config, dim, num_heads, name="attention") self.drop_path = ( TFSwinDropPath(drop_path_rate, name="drop_path") if drop_path_rate > 0.0 else keras.layers.Activation("linear", name="drop_path") ) self.layernorm_after = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm_after") self.intermediate = TFSwinIntermediate(config, dim, name="intermediate") self.swin_output = TFSwinOutput(config, dim, name="output") self.dim = dim def get_attn_mask(self, height: int, width: int, window_size: int, shift_size: int) -> tf.Tensor | None: img_mask = tf.zeros((height, width)) height_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) width_slices = ((0, -window_size), (-window_size, -shift_size), (-shift_size, -1)) # calculate attention mask for SW-MSA if shift_size > 0: count = 0 for height_slice in height_slices: for width_slice in width_slices: height_inds = tf.range(height_slice[0] % height, height_slice[1] % height + 1) width_inds = tf.range(width_slice[0] % width, width_slice[1] % width + 1) indices = tf.reshape(tf.stack(tf.meshgrid(height_inds, width_inds), axis=-1), (-1, 2)) if len(indices) >= 1: updates = tf.ones((len(indices),), dtype=img_mask.dtype) * count img_mask = tf.tensor_scatter_nd_update(img_mask, indices, updates) count += 1 img_mask = tf.expand_dims(img_mask, -1) img_mask = tf.expand_dims(img_mask, 0) mask_windows = window_partition(img_mask, window_size) mask_windows = tf.reshape(mask_windows, (-1, window_size * window_size)) attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2) attn_mask = tf.where(attn_mask != 0, -100.0, attn_mask) attn_mask = tf.where(attn_mask == 0, 0.0, attn_mask) return attn_mask def maybe_pad( self, hidden_states: tf.Tensor, window_size: int, height: int, width: int ) -> tuple[tf.Tensor, tf.Tensor]: pad_right = (window_size - width % window_size) % window_size pad_bottom = (window_size - height % window_size) % window_size pad_values = [[0, 0], [0, pad_bottom], [0, pad_right], [0, 0]] hidden_states = tf.pad(hidden_states, pad_values) pad_values = tf.reshape(pad_values, (-1,)) return hidden_states, pad_values def call( self, hidden_states: tf.Tensor, input_dimensions: tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: bool = False, training: bool = False, ) -> tf.Tensor: # if window size is larger than input resolution, we don't partition windows min_res = tf.reduce_min(input_dimensions) shift_size = 0 if min_res <= self.window_size else self.shift_size window_size = min_res if min_res <= self.window_size else self.window_size height, width = input_dimensions batch_size, _, channels = shape_list(hidden_states) shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states, training=training) hidden_states = tf.reshape(hidden_states, (batch_size, height, width, channels)) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, window_size, height, width) _, height_pad, width_pad, _ = shape_list(hidden_states) # cyclic shift if shift_size > 0: shifted_hidden_states = tf.roll(hidden_states, shift=(-shift_size, -shift_size), axis=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, window_size) hidden_states_windows = tf.reshape(hidden_states_windows, (-1, window_size * window_size, channels)) attn_mask = self.get_attn_mask( height=height_pad, width=width_pad, window_size=window_size, shift_size=shift_size ) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions, training=training ) attention_output = attention_outputs[0] attention_windows = tf.reshape(attention_output, (-1, window_size, window_size, channels)) shifted_windows = window_reverse(attention_windows, window_size, height_pad, width_pad) # reverse cyclic shift if shift_size > 0: attention_windows = tf.roll(shifted_windows, shift=(shift_size, shift_size), axis=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :] attention_windows = tf.reshape(attention_windows, (batch_size, height * width, channels)) hidden_states = shortcut + self.drop_path(attention_windows, training=training) layer_output = self.layernorm_after(hidden_states, training=training) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.swin_output(layer_output, training=training) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layernorm_before", None) is not None: with tf.name_scope(self.layernorm_before.name): self.layernorm_before.build([None, None, self.dim]) if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) if getattr(self, "layernorm_after", None) is not None: with tf.name_scope(self.layernorm_after.name): self.layernorm_after.build([None, None, self.dim]) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "swin_output", None) is not None: with tf.name_scope(self.swin_output.name): self.swin_output.build(None) class TFSwinStage(keras.layers.Layer): def __init__( self, config: SwinConfig, dim: int, input_resolution: tuple[int, int], depth: int, num_heads: int, drop_path: list[float], downsample: Callable | None, **kwargs, ) -> None: super().__init__(**kwargs) self.config = config self.dim = dim self.blocks = [ TFSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, drop_path_rate=drop_path[i], name=f"blocks.{i}", ) for i in range(depth) ] # patch merging layer if downsample is not None: self.downsample = downsample( input_resolution, dim=dim, norm_layer=partial(keras.layers.LayerNormalization, epsilon=1e-5), name="downsample", ) else: self.downsample = None self.pointing = False def call( self, hidden_states: tf.Tensor, input_dimensions: tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: bool | None = False, training: bool = False, ) -> tuple[tf.Tensor, ...]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training ) hidden_states = layer_outputs[0] if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(layer_outputs[0], input_dimensions, training=training) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "downsample", None) is not None: with tf.name_scope(self.downsample.name): self.downsample.build(None) if getattr(self, "blocks", None) is not None: for layer in self.blocks: with tf.name_scope(layer.name): layer.build(None) class TFSwinEncoder(keras.layers.Layer): def __init__(self, config: SwinConfig, grid_size: tuple[int, int], **kwargs): super().__init__(**kwargs) self.num_layers = len(config.depths) self.config = config dpr = list((tf.linspace(0, 1, sum(config.depths)) * config.drop_path_rate).numpy()) self.layers = [ TFSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=TFSwinPatchMerging if (i_layer < self.num_layers - 1) else None, name=f"layers.{i_layer}", ) for i_layer in range(self.num_layers) ] self.gradient_checkpointing = False def call( self, hidden_states: tf.Tensor, input_dimensions: tuple[int, int], head_mask: tf.Tensor | None = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, training: bool = False, ) -> tuple[tf.Tensor, ...] | TFSwinEncoderOutput: all_input_dimensions = () all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = shape_list(hidden_states) # rearrange b (h w) c -> b c h w reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, training=training ) hidden_states = layer_outputs[0] output_dimensions = layer_outputs[1] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) all_input_dimensions += (input_dimensions,) if output_hidden_states: batch_size, _, hidden_size = shape_list(hidden_states) # rearrange b (h w) c -> b c h w reshaped_hidden_state = tf.reshape(hidden_states, (batch_size, *input_dimensions, hidden_size)) reshaped_hidden_state = tf.transpose(reshaped_hidden_state, (0, 3, 1, 2)) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return TFSwinEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) class TFSwinPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwinConfig base_model_prefix = "swin" main_input_name = "pixel_values" SWIN_START_DOCSTRING = r""" This model is a Tensorflow [keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. Parameters: config ([`SwinConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SWIN_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ def normalize_data_format(value: str) -> str: """ From tensorflow addons https://github.com/tensorflow/addons/blob/8cec33fcaaf1cf90aec7bdd55a0fcdbb251ce5c2/tensorflow_addons/utils/keras_utils.py#L71 """ if value is None: value = keras.backend.image_data_format() data_format = value.lower() if data_format not in {"channels_first", "channels_last"}: raise ValueError( 'The `data_format` argument must be one of "channels_first", "channels_last". Received: ' + str(value) ) return data_format class AdaptiveAveragePooling1D(keras.layers.Layer): """ Args: Average 1D Pooling with adaptive kernel size. output_size: An integer or tuple/list of a single integer, specifying pooled_features. The new size of output channels. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, steps)`. Input shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch, steps, channels)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch, channels, steps)`. Output shape: - If `data_format='channels_last'`: 3D tensor with shape `(batch_size, pooled_steps, channels)`. - If `data_format='channels_first'`: 3D tensor with shape `(batch_size, channels, pooled_steps)`. Adapted from [tensorflow-addon's adaptive pooling.py]( https://github.com/tensorflow/addons/blob/8cec33fcaaf1cf90aec7bdd55a0fcdbb251ce5c2/tensorflow_addons/layers/adaptive_pooling.py#L90-L120 ) """ def __init__( self, output_size: int | Iterable[int], reduce_function: Callable = tf.reduce_mean, data_format: str | None = None, **kwargs, ) -> None: self.data_format = normalize_data_format(data_format) self.reduce_function = reduce_function self.output_size = (output_size,) if isinstance(output_size, int) else tuple(output_size) super().__init__(**kwargs) def call(self, inputs: tf.Tensor, *args) -> None: bins = self.output_size[0] if self.data_format == "channels_last": splits = tf.split(inputs, bins, axis=1) splits = tf.stack(splits, axis=1) out_vect = self.reduce_function(splits, axis=2) else: splits = tf.split(inputs, bins, axis=2) splits = tf.stack(splits, axis=2) out_vect = self.reduce_function(splits, axis=3) return out_vect def compute_output_shape(self, input_shape: Iterable[int]) -> tf.TensorShape: input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == "channels_last": shape = tf.TensorShape([input_shape[0], self.output_size[0], input_shape[2]]) else: shape = tf.TensorShape([input_shape[0], input_shape[1], self.output_size[0]]) return shape def get_config(self) -> dict[str, Any]: config = { "output_size": self.output_size, "data_format": self.data_format, } base_config = super().get_config() return {**base_config, **config} @keras_serializable class TFSwinMainLayer(keras.layers.Layer): config_class = SwinConfig def __init__( self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs ) -> None: super().__init__(**kwargs) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = TFSwinEmbeddings(config, use_mask_token=use_mask_token, name="embeddings") self.encoder = TFSwinEncoder(config, self.embeddings.patch_grid, name="encoder") self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") self.pooler = AdaptiveAveragePooling1D(output_size=(1,)) if add_pooling_layer else None def get_input_embeddings(self) -> TFSwinPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: dict[int, list]): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_head_mask(self, head_mask: Any | None) -> list: if head_mask is not None: raise NotImplementedError return [None] * len(self.config.depths) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFSwinModelOutput | tuple[tf.Tensor, ...]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask) embedding_output, input_dimensions = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, training=training ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output, training=training) pooled_output = None if self.pooler is not None: batch_size, _, num_features = shape_list(sequence_output) pooled_output = self.pooler(sequence_output) pooled_output = tf.reshape(pooled_output, (batch_size, num_features)) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return TFSwinModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "layernorm", None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, self.num_features]) @add_start_docstrings( "The bare Swin Model transformer outputting raw hidden-states without any specific head on top.", SWIN_START_DOCSTRING, ) class TFSwinModel(TFSwinPreTrainedModel): def __init__( self, config: SwinConfig, add_pooling_layer: bool = True, use_mask_token: bool = False, **kwargs ) -> None: super().__init__(config, **kwargs) self.config = config self.swin = TFSwinMainLayer(config, name="swin") @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSwinModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFSwinModelOutput | tuple[tf.Tensor, ...]: r""" bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") swin_outputs = self.swin( pixel_values=pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return swin_outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None) class TFSwinPixelShuffle(keras.layers.Layer): """TF layer implementation of torch.nn.PixelShuffle""" def __init__(self, upscale_factor: int, **kwargs) -> None: super().__init__(**kwargs) if not isinstance(upscale_factor, int) or upscale_factor < 2: raise ValueError(f"upscale_factor must be an integer value >= 2 got {upscale_factor}") self.upscale_factor = upscale_factor def call(self, x: tf.Tensor) -> tf.Tensor: hidden_states = x batch_size, _, _, num_input_channels = shape_list(hidden_states) block_size_squared = self.upscale_factor**2 output_depth = int(num_input_channels / block_size_squared) # When the number of output channels >= 2, PyTorch's PixelShuffle and # TF's depth_to_space differ in their output as the order of channels selected for combining # is a permutation of the other c.f. # https://stackoverflow.com/questions/68272502/tf-depth-to-space-not-same-as-torchs-pixelshuffle-when-output-channels-1 permutation = tf.constant( [[i + j * block_size_squared for i in range(block_size_squared) for j in range(output_depth)]] ) hidden_states = tf.gather(params=hidden_states, indices=tf.tile(permutation, [batch_size, 1]), batch_dims=-1) hidden_states = tf.nn.depth_to_space(hidden_states, block_size=self.upscale_factor, data_format="NHWC") return hidden_states class TFSwinDecoder(keras.layers.Layer): def __init__(self, config: SwinConfig, **kwargs): super().__init__(**kwargs) self.conv2d = keras.layers.Conv2D( filters=config.encoder_stride**2 * config.num_channels, kernel_size=1, strides=1, name="0" ) self.pixel_shuffle = TFSwinPixelShuffle(config.encoder_stride, name="1") self.config = config def call(self, x: tf.Tensor) -> tf.Tensor: hidden_states = x # B,C,H,W -> B,H,W,C hidden_states = tf.transpose(hidden_states, (0, 2, 3, 1)) hidden_states = self.conv2d(hidden_states) hidden_states = self.pixel_shuffle(hidden_states) # B,H,W,C -> B,C,H,W hidden_states = tf.transpose(hidden_states, (0, 3, 1, 2)) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "conv2d", None) is not None: with tf.name_scope(self.conv2d.name): self.conv2d.build([None, None, None, self.config.hidden_size]) if getattr(self, "pixel_shuffle", None) is not None: with tf.name_scope(self.pixel_shuffle.name): self.pixel_shuffle.build(None) @add_start_docstrings( "Swin Model with a decoder on top for masked image modeling, as proposed in" " [SimMIM](https://huggingface.co/papers/2111.09886).", SWIN_START_DOCSTRING, ) class TFSwinForMaskedImageModeling(TFSwinPreTrainedModel): def __init__(self, config: SwinConfig): super().__init__(config) self.swin = TFSwinMainLayer(config, add_pooling_layer=False, use_mask_token=True, name="swin") self.decoder = TFSwinDecoder(config, name="decoder") @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, bool_masked_pos: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple | TFSwinMaskedImageModelingOutput: r""" bool_masked_pos (`tf.Tensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, TFSwinForMaskedImageModeling >>> import tensorflow as tf >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = TFSwinForMaskedImageModeling.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="tf").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = tf.random.uniform((1, num_patches)) >= 0.5 >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 224, 224] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swin( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = tf.transpose(sequence_output, (0, 2, 1)) batch_size, num_channels, sequence_length = shape_list(sequence_output) height = width = int(sequence_length**0.5) sequence_output = tf.reshape(sequence_output, (batch_size, num_channels, height, width)) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = tf.reshape(bool_masked_pos, (-1, size, size)) mask = tf.repeat(bool_masked_pos, self.config.patch_size, 1) mask = tf.repeat(mask, self.config.patch_size, 2) mask = tf.expand_dims(mask, 1) mask = tf.cast(mask, tf.float32) reconstruction_loss = keras.losses.mean_absolute_error( # Swap axes as metric calculation reduces over the final dimension tf.transpose(pixel_values, (1, 2, 3, 0)), tf.transpose(reconstructed_pixel_values, (1, 2, 3, 0)), ) reconstruction_loss = tf.expand_dims(reconstruction_loss, 0) total_loss = tf.reduce_sum(reconstruction_loss * mask) num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels masked_im_loss = total_loss / num_masked_pixels masked_im_loss = tf.reshape(masked_im_loss, (1,)) if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return TFSwinMaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( """ Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, SWIN_START_DOCSTRING, ) class TFSwinForImageClassification(TFSwinPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config: SwinConfig): super().__init__(config) self.num_labels = config.num_labels self.swin = TFSwinMainLayer(config, name="swin") # Classifier head self.classifier = ( keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else keras.layers.Activation("linear", name="classifier") ) @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=TFSwinImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) @unpack_inputs def call( self, pixel_values: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, labels: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor, ...] | TFSwinImageClassifierOutput: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swin( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] logits = self.classifier(pooled_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSwinImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "swin", None) is not None: with tf.name_scope(self.swin.name): self.swin.build(None) if getattr(self, "classifier", None) is not None: if hasattr(self.classifier, "name"): with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.swin.num_features]) __all__ = ["TFSwinForImageClassification", "TFSwinForMaskedImageModeling", "TFSwinModel", "TFSwinPreTrainedModel"]
transformers/src/transformers/models/swin/modeling_tf_swin.py/0
{ "file_path": "transformers/src/transformers/models/swin/modeling_tf_swin.py", "repo_id": "transformers", "token_count": 30541 }
557
# coding=utf-8 # Copyright The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Table Transformer model configuration""" from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class TableTransformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TableTransformerModel`]. It is used to instantiate a Table Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Table Transformer [microsoft/table-transformer-detection](https://huggingface.co/microsoft/table-transformer-detection) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: use_timm_backbone (`bool`, *optional*, defaults to `True`): Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] API. backbone_config (`PretrainedConfig` or `dict`, *optional*): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`TableTransformerModel`] can detect in a single image. For COCO, we recommend 100 queries. d_model (`int`, *optional*, defaults to 256): Dimension of the layers. encoder_layers (`int`, *optional*, defaults to 6): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimension of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `True`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. mask_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the Focal loss in the panoptic segmentation loss. dice_loss_coefficient (`float`, *optional*, defaults to 1): Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Examples: ```python >>> from transformers import TableTransformerModel, TableTransformerConfig >>> # Initializing a Table Transformer microsoft/table-transformer-detection style configuration >>> configuration = TableTransformerConfig() >>> # Initializing a model from the microsoft/table-transformer-detection style configuration >>> model = TableTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "table-transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } # Copied from transformers.models.detr.configuration_detr.DetrConfig.__init__ def __init__( self, use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, encoder_layers=6, encoder_ffn_dim=2048, encoder_attention_heads=8, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=8, encoder_layerdrop=0.0, decoder_layerdrop=0.0, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", backbone="resnet50", use_pretrained_backbone=True, backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, giou_cost=2, mask_loss_coefficient=1, dice_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs, ): # We default to values which were previously hard-coded in the model. This enables configurability of the config # while keeping the default behavior the same. if use_timm_backbone and backbone_kwargs is None: backbone_kwargs = {} if dilation: backbone_kwargs["output_stride"] = 16 backbone_kwargs["out_indices"] = [1, 2, 3, 4] backbone_kwargs["in_chans"] = num_channels # Backwards compatibility elif not use_timm_backbone and backbone in (None, "resnet50"): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) backbone = None # set timm attributes to None dilation = None verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.init_xavier_std = init_xavier_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.backbone_kwargs = backbone_kwargs self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.mask_loss_coefficient = mask_loss_coefficient self.dice_loss_coefficient = dice_loss_coefficient self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model @property def sub_configs(self): return ( {"backbone_config": type(self.backbone_config)} if getattr(self, "backbone_config", None) is not None else {} ) # Copied from transformers.models.detr.configuration_detr.DetrOnnxConfig class TableTransformerOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def atol_for_validation(self) -> float: return 1e-5 @property def default_onnx_opset(self) -> int: return 12 __all__ = ["TableTransformerConfig", "TableTransformerOnnxConfig"]
transformers/src/transformers/models/table_transformer/configuration_table_transformer.py/0
{ "file_path": "transformers/src/transformers/models/table_transformer/configuration_table_transformer.py", "repo_id": "transformers", "token_count": 5308 }
558
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration for TimmWrapper models""" from typing import Any, Optional from ...configuration_utils import PretrainedConfig from ...utils import is_timm_available, logging, requires_backends if is_timm_available(): from timm.data import ImageNetInfo, infer_imagenet_subset logger = logging.get_logger(__name__) class TimmWrapperConfig(PretrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to instantiate a timm model according to the specified arguments, defining the model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default imagenet models is set to `None` due to occlusions in the label descriptions. Args: initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. do_pooling (`bool`, *optional*, defaults to `True`): Whether to do pooling for the last_hidden_state in `TimmWrapperModel` or not. model_args (`dict[str, Any]`, *optional*): Additional keyword arguments to pass to the `timm.create_model` function. e.g. `model_args={"depth": 3}` for `timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k` to create a model with 3 blocks. Defaults to `None`. Example: ```python >>> from transformers import TimmWrapperModel >>> # Initializing a timm model >>> model = TimmWrapperModel.from_pretrained("timm/resnet18.a1_in1k") >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "timm_wrapper" def __init__( self, initializer_range: float = 0.02, do_pooling: bool = True, model_args: Optional[dict[str, Any]] = None, **kwargs, ): self.initializer_range = initializer_range self.do_pooling = do_pooling self.model_args = model_args # named "model_args" for BC with timm super().__init__(**kwargs) @classmethod def from_dict(cls, config_dict: dict[str, Any], **kwargs): label_names = config_dict.get("label_names") is_custom_model = "num_labels" in kwargs or "id2label" in kwargs # if no labels added to config, use imagenet labeller in timm if label_names is None and not is_custom_model: requires_backends(cls, ["timm"]) imagenet_subset = infer_imagenet_subset(config_dict) if imagenet_subset: dataset_info = ImageNetInfo(imagenet_subset) synsets = dataset_info.label_names() label_descriptions = dataset_info.label_descriptions(as_dict=True) label_names = [label_descriptions[synset] for synset in synsets] if label_names is not None and not is_custom_model: kwargs["id2label"] = dict(enumerate(label_names)) # if all label names are unique, create label2id mapping as well if len(set(label_names)) == len(label_names): kwargs["label2id"] = {name: i for i, name in enumerate(label_names)} else: kwargs["label2id"] = None # timm config stores the `num_classes` attribute in both the root of config and in the "pretrained_cfg" dict. # We are removing these attributes in order to have the native `transformers` num_labels attribute in config # and to avoid duplicate attributes num_labels_in_kwargs = kwargs.pop("num_labels", None) num_labels_in_dict = config_dict.pop("num_classes", None) # passed num_labels has priority over num_classes in config_dict kwargs["num_labels"] = num_labels_in_kwargs or num_labels_in_dict # pop num_classes from "pretrained_cfg", # it is not necessary to have it, only root one is used in timm if "pretrained_cfg" in config_dict and "num_classes" in config_dict["pretrained_cfg"]: config_dict["pretrained_cfg"].pop("num_classes", None) return super().from_dict(config_dict, **kwargs) def to_dict(self) -> dict[str, Any]: output = super().to_dict() output["num_classes"] = self.num_labels output["label_names"] = list(self.id2label.values()) output.pop("id2label", None) output.pop("label2id", None) return output __all__ = ["TimmWrapperConfig"]
transformers/src/transformers/models/timm_wrapper/configuration_timm_wrapper.py/0
{ "file_path": "transformers/src/transformers/models/timm_wrapper/configuration_timm_wrapper.py", "repo_id": "transformers", "token_count": 1993 }
559
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Hubert checkpoint.""" import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, Wav2Vec2FeatureExtractor, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) def convert_classification(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForSequenceClassification.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["projector.weight"] model.projector.bias.data = downstream_dict["projector.bias"] model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"] model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"] return model def convert_diarization(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForAudioFrameClassification.from_pretrained(base_model_name, config=hf_config) model.classifier.weight.data = downstream_dict["model.linear.weight"] model.classifier.bias.data = downstream_dict["model.linear.bias"] return model def convert_xvector(base_model_name, hf_config, downstream_dict): model = UniSpeechSatForXVector.from_pretrained(base_model_name, config=hf_config) model.projector.weight.data = downstream_dict["connector.weight"] model.projector.bias.data = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel): model.tdnn[i].kernel.weight.data = downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] model.tdnn[i].kernel.bias.data = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] model.feature_extractor.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] model.feature_extractor.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] model.classifier.weight.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] model.classifier.bias.data = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] model.objective.weight.data = downstream_dict["objective.W"] return model @torch.no_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): """ Copy/paste/tweak model's weights to transformers design. """ checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) downstream_dict = checkpoint["Downstream"] hf_config = UniSpeechSatConfig.from_pretrained(config_path) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( base_model_name, return_attention_mask=True, do_normalize=False ) arch = hf_config.architectures[0] if arch.endswith("ForSequenceClassification"): hf_model = convert_classification(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForAudioFrameClassification"): hf_model = convert_diarization(base_model_name, hf_config, downstream_dict) elif arch.endswith("ForXVector"): hf_model = convert_xvector(base_model_name, hf_config, downstream_dict) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}") if hf_config.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") args = parser.parse_args() convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
transformers/src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/unispeech_sat/convert_unispeech_original_s3prl_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1698 }
560
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from huggingface_hub import hf_hub_download from transformers import ( AddedToken, AutoConfig, AutoTokenizer, VideoLlavaConfig, VideoLlavaForConditionalGeneration, VideoLlavaImageProcessor, VideoLlavaProcessor, ) EPILOG_TXT = """Example: python transformers/src/transformers/models/video_llava/convert_video_llava_weights_to_hf.py --text_model_id lmsys/vicuna-7b-v1.5 --vision_model_id openai/clip-vit-large-patch14 --output_hub_path org/video_llava-7b --old_state_dict_id LanguageBind/Video-LLaVA-7B Example for creating the old state dict file with Python: import torch from video_llava.model.language_model.video_llava import VideoLlavaForCausalLM # load model kwargs = {"device_map": "auto", "dtype": torch.float16} model = VideoLlavaForCausalLM.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", **kwargs) # load vision tower model.get_vision_tower().load_model() # Save state dict torch.save(model.state_dict(), "tmp/hf_models/video_llava-7b/model_state_dict.bin") """ KEYS_TO_MODIFY_MAPPING = { "model.video_tower.video_tower": "video_tower", "model.image_tower.image_tower": "image_tower", "model.mm_projector": "multi_modal_projector", "model": "language_model.model", "lm_head": "language_model.lm_head", "video_tower": "video_tower.vision_model", "image_tower": "image_tower.vision_model", "multi_modal_projector.0": "multi_modal_projector.linear_1", "multi_modal_projector.2": "multi_modal_projector.linear_2", } def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value return new_state_dict def convert_video_llava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id): torch.set_default_dtype(torch.float16) text_config = AutoConfig.from_pretrained(text_model_id) tokenizer = AutoTokenizer.from_pretrained(text_model_id) tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True) tokenizer.add_tokens(AddedToken("<video>", special=True, normalized=False), special_tokens=True) tokenizer.add_special_tokens({"pad_token": "<pad>"}) tokenizer.padding_side = "left" image_processor = VideoLlavaImageProcessor.from_pretrained(vision_model_id) processor = VideoLlavaProcessor(tokenizer=tokenizer, image_processor=image_processor) config = VideoLlavaConfig(text_config=text_config) config.pad_token_id = 32002 with torch.device("meta"): model = VideoLlavaForConditionalGeneration(config) model_state_dict = set(model.state_dict().keys()) # Pad to 64 for performance reasons pad_shape = 64 state_dict_temp = "pytorch_model-0000{i}-of-00002.bin" for shard in range(1, 3): state_dict_path = hf_hub_download(old_state_dict_id, state_dict_temp.format(i=shard)) state_dict = torch.load(state_dict_path, map_location="cpu", weights_only=True) state_dict = convert_state_dict_to_hf(state_dict) model.load_state_dict(state_dict, strict=False, assign=True) model_state_dict -= set(state_dict.keys()) if len(model_state_dict) > 0: raise RuntimeError(f"Missing keys in state dict: {model_state_dict}") pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image and video token so we resize the model model.resize_token_embeddings(config.text_config.vocab_size + 3, pad_shape) model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0])), dim=0, ) model.language_model.lm_head.weight.data[32000:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0])), dim=0, ) model.push_to_hub(output_hub_path) processor.push_to_hub(output_hub_path) def main(): parser = argparse.ArgumentParser( epilog=EPILOG_TXT, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--text_model_id", help="Hub location of the text model", ) parser.add_argument( "--vision_model_id", help="Hub location of the vision model", ) parser.add_argument( "--output_hub_path", help="Location on the hub of the converted model", ) parser.add_argument( "--old_state_dict_id", help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`", ) args = parser.parse_args() convert_video_llava_llama_to_hf( args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id ) if __name__ == "__main__": main()
transformers/src/transformers/models/video_llava/convert_video_llava_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/video_llava/convert_video_llava_weights_to_hf.py", "repo_id": "transformers", "token_count": 2379 }
561
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for Vilt.""" from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, get_max_height_width, group_images_by_shape, reorder_images, ) from ...image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, PILImageResampling, SizeDict from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F # Set maximum size based on the typical aspect ratio of the COCO dataset MAX_LONGER_EDGE = 1333 MAX_SHORTER_EDGE = 800 class ViltFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ Args: do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. If `True`, will pad the images in the batch to the largest height and width in the batch. Padding will be applied to the bottom and right with zeros. size_divisor (`int`, *optional*, defaults to 32): The size to make the height and width divisible by. rescale_factor (`float`, *optional*, defaults to 1/255): The factor to rescale the image by. """ do_pad: Optional[bool] size_divisor: Optional[int] rescale_factor: Optional[float] @auto_docstring class ViltImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"shortest_edge": 384} do_resize = True do_rescale = True do_normalize = True size_divisor = 32 do_pad = True default_to_square = False model_input_names = ["pixel_values", "pixel_mask"] valid_kwargs = ViltFastImageProcessorKwargs def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], size_divisor: Optional[int], do_pad: bool, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: """ Preprocess an image or batch of images. This method overrides the base class method to include padding and pixel mask generation. """ # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(stacked_images, size, interpolation, size_divisor) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) # Handle padding if required data = {} if do_pad: pixel_values, pixel_mask = self._pad_batch( processed_images, return_tensors, disable_grouping=disable_grouping ) data = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} else: # If no padding, just return the processed images if return_tensors == "pt": processed_images = torch.stack(processed_images) data = {"pixel_values": processed_images} return BatchFeature(data=data, tensor_type=return_tensors) def resize( self, images: "torch.Tensor", size: SizeDict, interpolation: Optional["F.InterpolationMode"] = None, size_divisor: Optional[int] = None, ) -> "torch.Tensor": """ Resize an image or batch of images to specified size. Args: images (`torch.Tensor`): Image or batch of images to resize. size (`dict[str, int]`): Size dictionary with shortest_edge key. interpolation (`F.InterpolationMode`, *optional*): Interpolation method to use. size_divisor (`int`, *optional*): Value to ensure height/width are divisible by. Returns: `torch.Tensor`: Resized image or batch of images. """ if interpolation is None: interpolation = self.resample # Resize with aspect ratio preservation shorter = size.shortest_edge longer = int(MAX_LONGER_EDGE / MAX_SHORTER_EDGE * shorter) heights = images.shape[-2] widths = images.shape[-1] # Determine the new dimensions if heights < widths: new_heights = shorter new_widths = widths * (shorter / heights) else: new_heights = heights * (shorter / widths) new_widths = shorter # Check if the longer side exceeds max size if max(new_heights, new_widths) > longer: scale = longer / max(new_heights, new_widths) new_heights = new_heights * scale new_widths = new_widths * scale new_heights = int(new_heights + 0.5) new_widths = int(new_widths + 0.5) # Make dimensions divisible by size_divisor if size_divisor is not None: new_heights = new_heights // size_divisor * size_divisor new_widths = new_widths // size_divisor * size_divisor # Resize the image return F.resize(images, [new_heights, new_widths], interpolation=interpolation) def _pad_batch( self, images: list["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]], disable_grouping: Optional[bool], ) -> tuple: """ Pad a batch of images to the same size based on the maximum dimensions. Args: images (`list[torch.Tensor]`): List of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Returns: `tuple`: Tuple containing padded images and pixel masks. """ # Calculate global maximum dimensions across all images max_size = get_max_height_width(images) # Group images by shape before padding grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) processed_images = {} processed_masks = {} for shape, stacked_images in grouped_images.items(): # Create mask template for efficient masking if return_tensors == "pt" and len(stacked_images) > 0: device = stacked_images.device mask_template = torch.zeros(max_size, dtype=torch.int64, device=device) original_size = stacked_images.shape[-2:] needs_padding = original_size[0] != max_size[0] or original_size[1] != max_size[1] if needs_padding: padding_bottom = max_size[0] - original_size[0] padding_right = max_size[1] - original_size[1] padding = [0, 0, padding_right, padding_bottom] padded_images = F.pad(stacked_images, padding, fill=0) pixel_mask = mask_template.clone() pixel_mask[: original_size[0], : original_size[1]].fill_(1) pixel_masks = pixel_mask.unsqueeze(0).repeat(stacked_images.shape[0], 1, 1) else: padded_images = stacked_images pixel_masks = torch.ones( (stacked_images.shape[0], max_size[0], max_size[1]), dtype=torch.int64, device=stacked_images.device, ) # Store processed group processed_images[shape] = padded_images processed_masks[shape] = pixel_masks # Reorder images back to original order padded_images = reorder_images(processed_images, grouped_images_index) pixel_masks = reorder_images(processed_masks, grouped_images_index) # Stack if tensors are requested for final result if return_tensors == "pt" and padded_images: padded_images = torch.stack(padded_images) pixel_masks = torch.stack(pixel_masks) return padded_images, pixel_masks __all__ = ["ViltImageProcessorFast"]
transformers/src/transformers/models/vilt/image_processing_vilt_fast.py/0
{ "file_path": "transformers/src/transformers/models/vilt/image_processing_vilt_fast.py", "repo_id": "transformers", "token_count": 4185 }
562
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow VisionTextDualEncoder model.""" from __future__ import annotations import re import tensorflow as tf from ...configuration_utils import PretrainedConfig from ...modeling_tf_utils import TFPreTrainedModel, keras, unpack_inputs from ...tf_utils import shape_list from ...utils import ( DUMMY_INPUTS, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ..auto.configuration_auto import AutoConfig from ..auto.modeling_tf_auto import TFAutoModel from ..clip.modeling_tf_clip import CLIPVisionConfig, TFCLIPOutput, TFCLIPVisionModel from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "VisionTextDualEncoderConfig" VISION_TEXT_DUAL_ENCODER_START_DOCSTRING = r""" This class can be used to initialize a vision-text dual encoder model with any pretrained vision autoencoding model as the vision encoder and any pretrained text model as the text encoder. The vision and text encoders are loaded via the [`~TFAutoModel.from_pretrained`] method. The projection layers are automatically added to the model and should be fine-tuned on a downstream task, like contrastive image-text modeling. In [LiT: Zero-Shot Transfer with Locked-image Text Tuning](https://huggingface.co/papers/2111.07991) it is shown how leveraging pre-trained (locked/frozen) image and text model for contrastive learning yields significant improvement on new zero-shot vision tasks such as image classification or retrieval. After such a Vision-Text-Dual-Encoder model has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Keras [Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular Keras Model and refer to the TF documentation for all matter related to general usage and behavior. Parameters: config ([`VisionEncoderDecoderConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ VISION_TEXT_DUAL_ENCODER_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ VISION_TEXT_DUAL_ENCODER_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ VISION_TEXT_DUAL_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using an image processor (e.g. if you use ViT as the encoder, you should use [`AutoImageProcessor`]). See [`ViTImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.clip.modeling_tf_clip.contrastive_loss def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: return tf.math.reduce_mean( keras.metrics.sparse_categorical_crossentropy( y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True ) ) # Copied from transformers.models.clip.modeling_tf_clip.clip_loss def clip_loss(similarity: tf.Tensor) -> tf.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(tf.transpose(similarity)) return (caption_loss + image_loss) / 2.0 @add_start_docstrings(VISION_TEXT_DUAL_ENCODER_START_DOCSTRING) class TFVisionTextDualEncoderModel(TFPreTrainedModel): config_class = VisionTextDualEncoderConfig base_model_prefix = "vision_text_dual_encoder" load_weight_prefix = "tf_vision_text_dual_encoder_model" def __init__( self, config: VisionTextDualEncoderConfig | None = None, vision_model: TFPreTrainedModel | None = None, text_model: TFPreTrainedModel | None = None, ): if config is None and (vision_model is None or text_model is None): raise ValueError("Either a configuration or an vision and a text model has to be provided") if config is None: config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config) else: if not isinstance(config, self.config_class): raise ValueError(f"config: {config} has to be of type {self.config_class}") # initialize with config super().__init__(config) if vision_model is None: if isinstance(config.vision_config, CLIPVisionConfig): vision_model = TFCLIPVisionModel.from_config(config.vision_config, name="vision_model") else: vision_model = TFAutoModel.from_config(config.vision_config, name="vision_model") if text_model is None: text_model = TFAutoModel.from_config(config.text_config, name="text_model") self.vision_model = vision_model self.text_model = text_model # make sure that the individual model's config refers to the shared config # so that the updates to the config will be synced self.vision_model.config = self.config.vision_config self.text_model.config = self.config.text_config self.vision_embed_dim = config.vision_config.hidden_size self.text_embed_dim = config.text_config.hidden_size self.projection_dim = config.projection_dim self.visual_projection = keras.layers.Dense(self.projection_dim, use_bias=False, name="visual_projection") self.text_projection = keras.layers.Dense(self.projection_dim, use_bias=False, name="text_projection") self.logit_scale = None self.config = config def build(self, input_shape=None): if self.built: return self.built = True # Build in the build() method to make sure the names are right initializer = keras.initializers.Constant(self.config.logit_scale_init_value) self.logit_scale = self.add_weight(shape=(1,), initializer=initializer, name="logit_scale") if getattr(self, "visual_projection", None) is not None: with tf.name_scope(self.visual_projection.name): self.visual_projection.build([None, None, self.vision_embed_dim]) if getattr(self, "text_projection", None) is not None: with tf.name_scope(self.text_projection.name): self.text_projection.build([None, None, self.text_embed_dim]) with tf.name_scope(self.vision_model.name): self.vision_model.build(None) with tf.name_scope(self.text_model.name): self.text_model.build(None) def tf_to_pt_weight_rename(self, tf_weight): # Matt: The TF and PT weights don't align because our TF base classes have an extra layer compared to PT models # (the main model stem is in the MainLayer class). If we remove that layer, then weight names sync up as normal. # However, the name of that extra layer is the name of the MainLayer in the base model. if "vision_model" in tf_weight: if tf_weight.count("vision_model") == 1: return (re.sub(r"vision_model\..*?\.", "vision_model.", tf_weight),) elif tf_weight.count("vision_model") == 2: return (re.sub(r"vision_model\..*?\.vision_model", "vision_model.vision_model", tf_weight),) else: raise ValueError( f"Unexpected weight name {tf_weight}. Please file an issue on the" " Transformers repo to let us know about this error!" ) elif "text_model" in tf_weight: return (re.sub(r"text_model\..*?\.", "text_model.", tf_weight),) else: return (tf_weight,) @add_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids=None, attention_mask=None, position_ids=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`]. Examples: ```python >>> from transformers import TFVisionTextDualEncoderModel, AutoTokenizer >>> model = TFVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", from_pt=True) >>> tokenizer = AutoTokenizer.from_pretrained("clip-italian/clip-italian") >>> inputs = tokenizer(["una foto di un gatto", "una foto di un cane"], padding=True, return_tensors="np") >>> text_features = model.get_text_features(**inputs) ```""" text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import TFVisionTextDualEncoderModel, AutoImageProcessor >>> model = TFVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", from_pt=True) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = image_processor(images=image, return_tensors="np") >>> image_features = model.get_image_features(**inputs) ```""" vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @unpack_inputs @add_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFCLIPOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, return_loss: bool | None = None, token_type_ids: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFCLIPOutput: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import ( ... TFVisionTextDualEncoderModel, ... VisionTextDualEncoderProcessor, ... AutoImageProcessor, ... AutoTokenizer, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") >>> processor = VisionTextDualEncoderProcessor(image_processor, tokenizer) >>> model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( ... "google/vit-base-patch16-224", "google-bert/bert-base-uncased" ... ) >>> # contrastive training >>> urls = [ ... "http://images.cocodataset.org/val2017/000000039769.jpg", ... "https://farm3.staticflickr.com/2674/5850229113_4fe05d5265_z.jpg", ... ] >>> images = [Image.open(requests.get(url, stream=True).raw) for url in urls] >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=images, return_tensors="np", padding=True ... ) >>> outputs = model( ... input_ids=inputs.input_ids, ... attention_mask=inputs.attention_mask, ... pixel_values=inputs.pixel_values, ... return_loss=True, ... ) >>> loss, logits_per_image = outputs.loss, outputs.logits_per_image # this is the image-text similarity score >>> # save and load from pretrained >>> model.save_pretrained("vit-bert") >>> model = TFVisionTextDualEncoderModel.from_pretrained("vit-bert") >>> # inference >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) image_embeds = vision_outputs[1] # pooler_output image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] # pooler_output text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = tf.math.exp(self.logit_scale) logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale logits_per_image = tf.transpose(logits_per_text) loss = None if return_loss: loss = clip_loss(logits_per_text) if loss.shape.rank == 0: loss = tf.expand_dims(loss, 0) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return TFCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) @classmethod def from_vision_text_pretrained( cls, vision_model_name_or_path: str | None = None, text_model_name_or_path: str | None = None, *model_args, **kwargs, ) -> TFPreTrainedModel: """ Params: vision_model_name_or_path (`str`, *optional*, defaults to `None`): Information necessary to initiate the vision model. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. text_model_name_or_path (`str`, *optional*): Information necessary to initiate the text model. Can be either: - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - A path to a *directory* containing model weights saved using [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`. - A path or url to a *PyTorch checkpoint folder* (e.g, `./pt_model`). In this case, `from_pt` should be set to `True` and a configuration object should be provided as `config` argument. model_args (remaining positional arguments, *optional*): All remaining positional arguments will be passed to the underlying model's `__init__` method. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). - To update the text configuration, use the prefix *text_* for each configuration parameter. - To update the vision configuration, use the prefix *vision_* for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a `config` is provided or automatically loaded. Example: ```python >>> from transformers import TFVisionTextDualEncoderModel >>> # initialize a model from pretrained ViT and BERT models. Note that the projection layers will be randomly initialized. >>> model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( ... "google/vit-base-patch16-224", "google-bert/bert-base-uncased" ... ) >>> # saving model after fine-tuning >>> model.save_pretrained("./vit-bert") >>> # load fine-tuned model >>> model = TFVisionTextDualEncoderModel.from_pretrained("./vit-bert") ```""" kwargs_vision = { argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_") } kwargs_text = { argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_") } # remove vision, text kwargs from kwargs for key in kwargs_vision: del kwargs["vision_" + key] for key in kwargs_text: del kwargs["text_" + key] # Load and initialize the vision and text model vision_model = kwargs_vision.pop("model", None) if vision_model is None: if vision_model_name_or_path is None: raise ValueError( "If `vision_model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" ) kwargs_vision["name"] = "vision_model" kwargs_vision["load_weight_prefix"] = cls.load_weight_prefix vision_config_dict, unused_args = PretrainedConfig.get_config_dict(vision_model_name_or_path, **kwargs) if vision_config_dict.get("model_type", None) == "clip_vision_model": vision_config = CLIPVisionConfig.from_dict(vision_config_dict) else: vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) if vision_config.model_type == "clip_vision_model": kwargs_vision["config"] = vision_config vision_class = TFCLIPVisionModel elif vision_config.model_type == "clip": kwargs_vision["config"] = vision_config.vision_config vision_class = TFCLIPVisionModel else: kwargs_vision["config"] = vision_config vision_class = TFAutoModel vision_model = vision_class.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision) text_model = kwargs_text.pop("model", None) if text_model is None: if text_model_name_or_path is None: raise ValueError( "If `text_model` is not defined as an argument, a `text_model_name_or_path` has to be defined" ) kwargs_text["name"] = "text_model" kwargs_text["load_weight_prefix"] = cls.load_weight_prefix if "config" not in kwargs_text: text_config = AutoConfig.from_pretrained(text_model_name_or_path) kwargs_text["config"] = text_config text_model = TFAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text) # instantiate config with corresponding kwargs config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config, **kwargs) # init model model = cls(config=config, vision_model=vision_model, text_model=text_model) # the projection layers are always newly initialized when loading the model # using pre-trained vision and text model. logger.warning( "The projection layer and logit scale weights `['visual_projection.weight', 'text_projection.weight'," " 'logit_scale']` are newly initialized. You should probably TRAIN this model on a down-stream task to be" " able to use it for predictions and inference." ) if vision_model.name != "vision_model": raise ValueError("vision model must be created with the name `vision_model`.") if text_model.name != "text_model": raise ValueError("text model must be created with the name `text_model`.") model.build_in_name_scope() # Ensure model is fully built return model @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: `dict[str, tf.Tensor]`: The dummy inputs. """ input_ids = tf.constant(DUMMY_INPUTS, dtype=tf.int32) batch_size, seq_len = input_ids.shape VISION_DUMMY_INPUTS = tf.random.uniform( shape=( batch_size, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size, ), dtype=tf.float32, ) pixel_values = tf.constant(VISION_DUMMY_INPUTS) dummy = {"pixel_values": pixel_values, "input_ids": input_ids} return dummy __all__ = ["TFVisionTextDualEncoderModel"]
transformers/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py/0
{ "file_path": "transformers/src/transformers/models/vision_text_dual_encoder/modeling_tf_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 11772 }
563
# coding=utf-8 # Copyright 2021 Google AI, Ross Wightman, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ViT model.""" import collections.abc import math from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput, MaskedImageModelingOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging, torch_int from .configuration_vit import ViTConfig logger = logging.get_logger(__name__) class ViTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. Optionally, also the mask token. """ def __init__(self, config: ViTConfig, use_mask_token: bool = False) -> None: super().__init__() self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) if bool_masked_pos is not None: seq_length = embeddings.shape[1] mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings class ViTPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." f" Expected {self.num_channels} but got {num_channels}." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class ViTSelfAttention(nn.Module): def __init__(self, config: ViTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class ViTSelfOutput(nn.Module): """ The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: ViTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class ViTAttention(nn.Module): def __init__(self, config: ViTConfig) -> None: super().__init__() self.attention = ViTSelfAttention(config) self.output = ViTSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class ViTIntermediate(nn.Module): def __init__(self, config: ViTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class ViTOutput(nn.Module): def __init__(self, config: ViTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class ViTLayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: ViTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ViTAttention(config) self.intermediate = ViTIntermediate(config) self.output = ViTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs class ViTEncoder(nn.Module): def __init__(self, config: ViTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([ViTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class ViTPreTrainedModel(PreTrainedModel): config: ViTConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["ViTEmbeddings", "ViTLayer"] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, ViTEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) module.cls_token.data = nn.init.trunc_normal_( module.cls_token.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.cls_token.dtype) if module.mask_token is not None: module.mask_token.data.zero_() @auto_docstring class ViTModel(ViTPreTrainedModel): def __init__(self, config: ViTConfig, add_pooling_layer: bool = True, use_mask_token: bool = False): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer use_mask_token (`bool`, *optional*, defaults to `False`): Whether to use a mask token for masked image modeling. """ super().__init__(config) self.config = config self.embeddings = ViTEmbeddings(config, use_mask_token=use_mask_token) self.encoder = ViTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = ViTPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> ViTPatchEmbeddings: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None: """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # TODO: maybe have a cleaner way to cast the input (from `ImageProcessor` side?) expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype if pixel_values.dtype != expected_dtype: pixel_values = pixel_values.to(expected_dtype) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class ViTPooler(nn.Module): def __init__(self, config: ViTConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.pooler_output_size) self.activation = ACT2FN[config.pooler_act] def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @auto_docstring( custom_intro=""" ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886). <Tip> Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). </Tip> """ ) class ViTForMaskedImageModeling(ViTPreTrainedModel): def __init__(self, config: ViTConfig) -> None: super().__init__(config) self.vit = ViTModel(config, add_pooling_layer=False, use_mask_token=True) self.decoder = nn.Sequential( nn.Conv2d( in_channels=config.hidden_size, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1, ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedImageModelingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Examples: ```python >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 224, 224] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride): raise ValueError( "When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that " "the reconstructed image has the same dimensions as the input. " f"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}." ) outputs = self.vit( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output[:, 1:] batch_size, sequence_length, num_channels = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[1:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return MaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """ ) class ViTForImageClassification(ViTPreTrainedModel): def __init__(self, config: ViTConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.vit = ViTModel(config, add_pooling_layer=False) # Classifier head self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.vit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["ViTForImageClassification", "ViTForMaskedImageModeling", "ViTModel", "ViTPreTrainedModel"]
transformers/src/transformers/models/vit/modeling_vit.py/0
{ "file_path": "transformers/src/transformers/models/vit/modeling_vit.py", "repo_id": "transformers", "token_count": 14499 }
564
# coding=utf-8 # Copyright 2025 Sesame and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io from typing import Optional, Union from ...utils import is_mistral_common_available, is_soundfile_available, is_torch_available, logging if is_torch_available(): import torch if is_soundfile_available(): import soundfile as sf if is_mistral_common_available(): from mistral_common.protocol.transcription.request import TranscriptionRequest from ...audio_utils import AudioInput, load_audio_as, make_list_of_audio from ...feature_extraction_utils import BatchFeature from ...processing_utils import AllKwargsForChatTemplate, AudioKwargs, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput logger = logging.get_logger(__name__) class VoxtralAudioKwargs(AudioKwargs, total=False): max_source_positions: Optional[int] class VoxtralProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": True, }, "audio_kwargs": { "sampling_rate": 16000, "padding": True, "truncation": False, "pad_to_multiple_of": 480000, "max_source_positions": 3000, }, "common_kwargs": { "return_tensors": "pt", "return_dict": True, "tokenize": True, }, } class VoxtralProcessor(ProcessorMixin): r""" Constructs a Voxtral processor which wraps [`WhisperFeatureExtractor`] and [`MistralCommonTokenizer`] into a single processor that inherits both the audio feature extraction and tokenizer functionalities. Args: feature_extractor ([`WhisperFeatureExtractor`]): The feature extractor is a required input. tokenizer ([`MistralCommonTokenizer`]): The tokenizer is a required input. """ attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = "WhisperFeatureExtractor" tokenizer_class = "MistralCommonTokenizer" def __init__( self, feature_extractor, tokenizer, ): self.audio_token_id = 24 self.audio_token = tokenizer.convert_ids_to_tokens(self.audio_token_id) super().__init__(feature_extractor, tokenizer) def _retreive_input_features(self, audio, max_source_positions, **kwargs): """ Handles specific logic of Voxtral expected input features: audio arrays should be padded to next multiple of 480000 (duration is a multiple of 30s), see VoxtralProcessorKwargs' default audio_kwargs. Then mel input features are extracted and stacked along batch dimension, splitting into chunks of max_source_positions. """ input_features_list = [] for audio_array in audio: audio_inputs = self.feature_extractor(audio_array, **kwargs) # let's split into chunks of max_source_positions, and then stack them along batch dimension input_features = audio_inputs["input_features"].reshape( self.feature_extractor.feature_size, -1, max_source_positions ) input_features_list.append(input_features.transpose(0, 1)) return torch.cat(input_features_list) def apply_chat_template( self, conversation: Union[list[dict[str, str]], list[list[dict[str, str]]]], **kwargs: Unpack[AllKwargsForChatTemplate], ) -> str: """ This method applies the model's chat completion template given a conversation. It relies on MistralCommonTokenizer's [`~MistralCommonTokenizer.apply_chat_template`] to prepare input ids to the model and on WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] to prepare input features to the model. Note that audio is padded to the nearest 30-second multiple prior to mel feature extraction. A `conversation` is a list of messages, where each message is a dictionary with a `role` and a `content` field. For Voxtral, `role` can be `"user"` or `"assistant"`. The `content` field can be a string or a list of dictionaries with a `type` field. See example below. ```python from huggingface_hub import hf_hub_download from transformers.audio_utils import load_audio_as audio_url = "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/bcn_weather.mp3" audio_path = hf_hub_download(repo_id="hf-internal-testing/dummy-audio-samples", filename="bcn_weather.mp3", repo_type="dataset") audio_base64 = load_audio_as(audio_path, return_format="base64", force_mono=True) # audio + text conversation = [ { "role": "user", "content": [ {"type": "audio", "url": audio_url}, {"type": "audio", "path": audio_path}, {"type": "audio", "base64": audio_base64}, {"type": "text", "text": "How many audio do you hear?"}, ], }, ] processor = VoxtralProcessor.from_pretrained("mistralai/Voxtral-Mini-3B-2507") inputs = processor.apply_chat_template(conversation) ``` Args: conversation (`Union[list[Dict, [str, str]], list[list[dict[str, str]]]]`): The conversation to format. """ if kwargs.get("continue_final_message", False): if kwargs.get("add_generation_prompt", False): raise ValueError( "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead." ) if kwargs.get("return_assistant_tokens_mask", False): raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.") # Fill sets of kwargs that should be used by different parts of template processed_kwargs = { "mm_load_kwargs": {}, "template_kwargs": {}, } for kwarg_type in processed_kwargs: for key in AllKwargsForChatTemplate.__annotations__[kwarg_type].__annotations__: kwarg_type_defaults = AllKwargsForChatTemplate.__annotations__[kwarg_type] default_value = getattr(kwarg_type_defaults, key, None) value = kwargs.pop(key, default_value) if value is not None and not isinstance(value, dict): processed_kwargs[kwarg_type][key] = value # Pass unprocessed custom kwargs processed_kwargs["template_kwargs"].update(kwargs) if isinstance(conversation, (list, tuple)) and ( isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "content") ): is_batched = True conversations = conversation else: is_batched = False conversations = [conversation] # Check for any overlapping keys between mm_load_kwargs and kwargs mm_load_kwargs = processed_kwargs["mm_load_kwargs"] if any(key in kwargs for key in mm_load_kwargs): overlapping_keys = [key for key in mm_load_kwargs if key in kwargs] logger.warning( f"{overlapping_keys[0] if len(overlapping_keys) == 1 else ', '.join(overlapping_keys)} load multimodal data kwarg{'s' if len(overlapping_keys) > 1 else ''} {'have' if len(overlapping_keys) > 1 else 'has'} been passed to the processor, but {'they are' if len(overlapping_keys) > 1 else 'it is'} not supported for VoxtralProcessor since it relies on mistral_common directly. {'They' if len(overlapping_keys) > 1 else 'It'} will be ignored." ) output_kwargs = self._merge_kwargs( VoxtralProcessorKwargs, **kwargs, ) text_kwargs = output_kwargs["text_kwargs"] audio_kwargs = output_kwargs["audio_kwargs"] common_kwargs = output_kwargs["common_kwargs"] return_tensors = common_kwargs.pop("return_tensors", None) if return_tensors != "pt": raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.") tokenizer_kwargs = {**processed_kwargs["template_kwargs"], **text_kwargs} tokenizer_kwargs["return_tensors"] = None # let's not return tensors here tokenize = tokenizer_kwargs.pop("tokenize", False) return_dict = tokenizer_kwargs.pop("return_dict", False) encoded_instruct_inputs = self.tokenizer.apply_chat_template( conversations, tokenize=tokenize, return_dict=return_dict, **tokenizer_kwargs, ) if tokenize: if return_dict: audio = encoded_instruct_inputs.pop("audio", None) data = dict(encoded_instruct_inputs) if audio is not None: max_source_positions = audio_kwargs.pop("max_source_positions") data["input_features"] = self._retreive_input_features(audio, max_source_positions, **audio_kwargs) return BatchFeature(data=data, tensor_type=return_tensors) if not is_batched: return encoded_instruct_inputs[0] return encoded_instruct_inputs def __call__( self, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]], **kwargs: Unpack[VoxtralProcessorKwargs], ): r""" Method to prepare text to be fed as input to the model. This method forwards the `text` arguments to MistralCommonTokenizer's [`~MistralCommonTokenizer.__call__`] to encode the text. Please refer to the docstring of the above methods for more information. This methods does not support audio. To prepare the audio, please use: 1. `apply_chat_template` [`~VoxtralProcessor.apply_chat_template`] method. 2. `apply_transcription_request` [`~VoxtralProcessor.apply_transcription_request`] method. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **input_features** -- List of audio values to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). """ if isinstance(text, str): text = [text] if any(self.audio_token in t for t in text): raise ValueError( f"{self.audio_token} is present in the provided text which is not supported by VoxtralProcessor. Please use the `apply_chat_template` method instead." ) output_kwargs = self._merge_kwargs( VoxtralProcessorKwargs, **kwargs, ) text_kwargs = output_kwargs["text_kwargs"] common_kwargs = output_kwargs["common_kwargs"] out = self.tokenizer(text, **text_kwargs) return BatchFeature(data=out, tensor_type=common_kwargs.pop("return_tensors", None)) # TODO: @eustlb, this should be moved to mistral_common + testing def apply_transcription_request( self, language: Union[str, list[str]], audio: Union[str, list[str], AudioInput], model_id: str, sampling_rate: Optional[int] = None, format: Optional[Union[str, list[str]]] = None, **kwargs: Unpack[VoxtralProcessorKwargs], ): """ This method applies the model's transcription request template given a language and audio. It relies on MistralCommonTokenizer and WhisperFeatureExtractor to prepare input ids and input features to the model. ```python from transformers import VoxtralProcessor model_id = "mistralai/Voxtral-Mini-3B-2507" processor = VoxtralProcessor.from_pretrained(model_id) language = "en" audio = "https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/obama.mp3" inputs = processor.apply_transcription_request(language=language, audio=audio, model_id=model_id) ``` Args: language (`str`, `list[str]`): The language or languages of the audio. If provided as a string, will be applied uniformly to all audio. If provided as a list, will be applied to each audio individually with a one-to-one mapping. audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The audio or batch of audio to be prepared. If provided as a string, it should correspond to the path or url of the audio file. model_id (`str`: The hub model id of the model to use for transcription. sampling_rate (`int`, *optional*): The sampling rate of the audio. Necessary if it is provided as `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`. Used to avoid silent errors when passing audio that is not in the expected sampling rate. format (`str`, `list[str]`, *optional*): The format of the audio, necessary if is provided as `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`. """ output_kwargs = self._merge_kwargs( VoxtralProcessorKwargs, **kwargs, ) text_kwargs = output_kwargs["text_kwargs"] audio_kwargs = output_kwargs["audio_kwargs"] common_kwargs = output_kwargs["common_kwargs"] is_str = isinstance(audio, str) is_list_of_str = all(isinstance(el, str) for el in audio) is_list_of_audio = not (is_str or is_list_of_str) if is_list_of_audio: if sampling_rate is None: logger.warning_once( f"You've provided audio without specifying the sampling rate. It will be assumed to be {audio_kwargs['sampling_rate']}, which can result in silent errors." ) elif sampling_rate != audio_kwargs["sampling_rate"]: raise ValueError( f"The sampling rate of the audio ({sampling_rate}) does not match the sampling rate of the processor ({audio_kwargs['sampling_rate']}). Please provide resampled the audio to the expected sampling rate." ) sampling_rate = audio_kwargs["sampling_rate"] return_dict = common_kwargs.pop("return_dict", False) tokenize = common_kwargs.pop("tokenize", False) # make sure to remove from text_kwargs and audio_kwargs for k in ("return_dict", "tokenize"): text_kwargs.pop(k, None) audio_kwargs.pop(k, None) return_tensors = common_kwargs.pop("return_tensors", None) if return_tensors != "pt": raise ValueError(f"{self.__class__.__name__} only supports `return_tensors='pt'`.") # validate audio input if is_str: audio = [load_audio_as(audio, return_format="buffer", force_mono=True, sampling_rate=sampling_rate)] elif is_list_of_str: audio = [ load_audio_as(el, return_format="buffer", force_mono=True, sampling_rate=sampling_rate) for el in audio ] else: audio = make_list_of_audio(audio) if len(audio) != len(format): raise ValueError( f"When passed as a list of audio, the length ({len(audio)}) must match the number of format ({len(format)})" ) audio_buffers = [] for array, f in zip(audio, format): # Create new BytesIO object and write audio data to it buffer = io.BytesIO() # Convert to mono if needed if array.ndim == 2: array = array.mean(axis=1) # Write to buffer with default format and sampling rate sf.write(buffer, array, samplerate=audio_kwargs["sampling_rate"], format=f) buffer.seek(0) audio_buffers.append(buffer) audio = audio_buffers # validate language input n_audio = len(audio) if isinstance(language, str): language = [language] * n_audio if len(language) != n_audio: raise ValueError( f"When passed as a list of languages, the length ({len(language)}) must match the number of audio ({n_audio})" ) input_ids = [] texts = [] audio_arrays = [] for audio_el, language_el in zip(audio, language): openai_transcription_request = { "model": model_id, "file": audio_el, "language": language_el, } transcription_request = TranscriptionRequest.from_openai(openai_transcription_request) tokenized_transcription_request = self.tokenizer.tokenizer.encode_transcription(transcription_request) input_ids.append(tokenized_transcription_request.tokens) texts.append(tokenized_transcription_request.text) audio_arrays.extend([el.audio_array for el in tokenized_transcription_request.audios]) if tokenize: if return_dict: # text are already tokenized but we need to pad etc encoding = self.tokenizer( input_ids, add_special_tokens=False, **text_kwargs, ) data = dict(encoding) # extract the input features max_source_positions = audio_kwargs.pop("max_source_positions") data["input_features"] = self._retreive_input_features( audio_arrays, max_source_positions, **audio_kwargs ) return BatchFeature(data=data, tensor_type=return_tensors) return texts __all__ = ["VoxtralProcessor"]
transformers/src/transformers/models/voxtral/processing_voxtral.py/0
{ "file_path": "transformers/src/transformers/models/voxtral/processing_voxtral.py", "repo_id": "transformers", "token_count": 8482 }
565
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Speech processor class for Wav2Vec2-BERT """ import warnings from typing import Optional, Union from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import AudioInput, PreTokenizedInput, TextInput from ..seamless_m4t.feature_extraction_seamless_m4t import SeamlessM4TFeatureExtractor from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer class Wav2Vec2BertProcessorKwargs(ProcessingKwargs, total=False): _defaults = {} class Wav2Vec2BertProcessor(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ feature_extractor_class = "SeamlessM4TFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): try: return super().from_pretrained(pretrained_model_name_or_path, **kwargs) except OSError: warnings.warn( f"Loading a tokenizer inside {cls.__name__} from a config that does not" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", FutureWarning, ) feature_extractor = SeamlessM4TFeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(feature_extractor=feature_extractor, tokenizer=tokenizer) def __call__( self, audio: AudioInput = None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None, images=None, videos=None, **kwargs: Unpack[Wav2Vec2BertProcessorKwargs], ): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the docstring of the above two methods for more information. Args: audio (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") output_kwargs = self._merge_kwargs( Wav2Vec2BertProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if audio is not None: inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) if text is not None: encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the docstring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features @property def model_input_names(self): # The processor doesn't return text ids and the model seems to not need them feature_extractor_input_names = self.feature_extractor.model_input_names return feature_extractor_input_names + ["labels"] __all__ = ["Wav2Vec2BertProcessor"]
transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py", "repo_id": "transformers", "token_count": 2952 }
566
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for XCLIP """ import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class XCLIPProcessor(ProcessorMixin): r""" Constructs an X-CLIP processor which wraps a VideoMAE image processor and a CLIP tokenizer into a single processor. [`XCLIPProcessor`] offers all the functionalities of [`VideoMAEImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~XCLIPProcessor.__call__`] and [`~XCLIPProcessor.decode`] for more information. Args: image_processor ([`VideoMAEImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "VideoMAEImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) self.current_processor = self.image_processor def __call__(self, text=None, videos=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to VideoMAEImageProcessor's [`~VideoMAEImageProcessor.__call__`] if `videos` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, `list[list[PIL.Image.Image]]`, `list[list[np.ndarray]]`,: `list[list[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of channels. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`. """ if text is None and videos is None: raise ValueError("You have to specify either text or videos. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if videos is not None: image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs) if text is not None and videos is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor __all__ = ["XCLIPProcessor"]
transformers/src/transformers/models/x_clip/processing_x_clip.py/0
{ "file_path": "transformers/src/transformers/models/x_clip/processing_x_clip.py", "repo_id": "transformers", "token_count": 2393 }
567
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XLM model. """ from __future__ import annotations import itertools import warnings from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_xlm import XLMConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "FacebookAI/xlm-mlm-en-2048" _CONFIG_FOR_DOC = "XLMConfig" def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = tf.constant(np.sin(position_enc[:, 0::2])) out[:, 1::2] = tf.constant(np.cos(position_enc[:, 1::2])) def get_masks(slen, lengths, causal, padding_mask=None): """ Generate hidden states mask, and optionally an attention mask. """ bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) ) else: attn_mask = mask # sanity check # assert shape_list(mask) == [bs, slen] tf.debugging.assert_equal(shape_list(mask), [bs, slen]) if causal: tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask class TFXLMMultiHeadAttention(keras.layers.Layer): NEW_ID = itertools.count() def __init__(self, n_heads, dim, config, **kwargs): super().__init__(**kwargs) self.layer_id = next(TFXLMMultiHeadAttention.NEW_ID) self.dim = dim self.n_heads = n_heads self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin") self.k_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin") self.v_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin") self.out_lin = keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin") self.dropout = keras.layers.Dropout(config.attention_dropout) self.pruned_heads = set() self.dim = dim def prune_heads(self, heads): raise NotImplementedError def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False): """ Self-attention (if kv is None) or attention over source sentence (provided by kv). """ # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) bs, qlen, dim = shape_list(input) if kv is None: klen = qlen if cache is None else cache["slen"] + qlen else: klen = shape_list(kv)[1] # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' dim_per_head = self.dim // self.n_heads mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen) def shape(x): """projection""" return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3)) def unshape(x): """compute context""" return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head)) q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head) if kv is None: k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head) elif cache is None or self.layer_id not in cache: k = v = kv k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head) v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head) if cache is not None: if self.layer_id in cache: if kv is None: k_, v_ = cache[self.layer_id] k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head) v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head) else: k, v = cache[self.layer_id] cache[self.layer_id] = (k, v) f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype) q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head) k = tf.cast(k, dtype=q.dtype) scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen) mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen) # scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen) mask = tf.cast(mask, dtype=scores.dtype) scores = scores - 1e30 * (1.0 - mask) weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen) weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head) context = unshape(context) # (bs, qlen, dim) outputs = (self.out_lin(context),) if output_attentions: outputs = outputs + (weights,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "q_lin", None) is not None: with tf.name_scope(self.q_lin.name): self.q_lin.build([None, None, self.dim]) if getattr(self, "k_lin", None) is not None: with tf.name_scope(self.k_lin.name): self.k_lin.build([None, None, self.dim]) if getattr(self, "v_lin", None) is not None: with tf.name_scope(self.v_lin.name): self.v_lin.build([None, None, self.dim]) if getattr(self, "out_lin", None) is not None: with tf.name_scope(self.out_lin.name): self.out_lin.build([None, None, self.dim]) class TFXLMTransformerFFN(keras.layers.Layer): def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs): super().__init__(**kwargs) self.lin1 = keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1") self.lin2 = keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2") self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu") self.dropout = keras.layers.Dropout(config.dropout) self.in_dim = in_dim self.dim_hidden = dim_hidden def call(self, input, training=False): x = self.lin1(input) x = self.act(x) x = self.lin2(x) x = self.dropout(x, training=training) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lin1", None) is not None: with tf.name_scope(self.lin1.name): self.lin1.build([None, None, self.in_dim]) if getattr(self, "lin2", None) is not None: with tf.name_scope(self.lin2.name): self.lin2.build([None, None, self.dim_hidden]) @keras_serializable class TFXLMMainLayer(keras.layers.Layer): config_class = XLMConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.use_return_dict # encoder / decoder, output layer self.is_encoder = config.is_encoder self.is_decoder = not config.is_encoder if self.is_decoder: raise NotImplementedError("Currently XLM can only be used as an encoder") # self.with_output = with_output self.causal = config.causal # dictionary / languages self.n_langs = config.n_langs self.use_lang_emb = config.use_lang_emb self.n_words = config.n_words self.eos_index = config.eos_index self.pad_index = config.pad_index # self.dico = dico # self.id2lang = config.id2lang # self.lang2id = config.lang2id # assert len(self.dico) == self.n_words # assert len(self.id2lang) == len(self.lang2id) == self.n_langs # model parameters self.dim = config.emb_dim # 512 by default self.hidden_dim = self.dim * 4 # 2048 by default self.n_heads = config.n_heads # 8 by default self.n_layers = config.n_layers self.max_position_embeddings = config.max_position_embeddings self.embed_init_std = config.embed_init_std if self.dim % self.n_heads != 0: raise ValueError("transformer dim must be a multiple of n_heads") # embeddings self.dropout = keras.layers.Dropout(config.dropout) self.attention_dropout = keras.layers.Dropout(config.attention_dropout) if config.sinusoidal_embeddings: raise NotImplementedError # create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight) self.embeddings = TFSharedEmbeddings( self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings" ) # padding_idx=self.pad_index) self.layer_norm_emb = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb") # transformer layers self.attentions = [] self.layer_norm1 = [] self.ffns = [] self.layer_norm2 = [] # if self.is_decoder: # self.layer_norm15 = [] # self.encoder_attn = [] for i in range(self.n_layers): self.attentions.append( TFXLMMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}") ) self.layer_norm1.append( keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}") ) # if self.is_decoder: # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps)) # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout)) self.ffns.append( TFXLMTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}") ) self.layer_norm2.append( keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}") ) if hasattr(config, "pruned_heads"): pruned_heads = config.pruned_heads.copy().items() config.pruned_heads = {} for layer, heads in pruned_heads: if self.attentions[int(layer)].n_heads == config.n_heads: self.prune_heads({int(layer): list(map(int, heads))}) def build(self, input_shape=None): if self.built: return self.built = True with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.dim], initializer=get_initializer(self.embed_init_std), ) if self.n_langs > 1 and self.use_lang_emb: with tf.name_scope("lang_embeddings"): self.lang_embeddings = self.add_weight( name="embeddings", shape=[self.n_langs, self.dim], initializer=get_initializer(self.embed_init_std), ) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "layer_norm_emb", None) is not None: with tf.name_scope(self.layer_norm_emb.name): self.layer_norm_emb.build([None, None, self.dim]) for layer in self.attentions: with tf.name_scope(layer.name): layer.build(None) for layer in self.layer_norm1: with tf.name_scope(layer.name): layer.build([None, None, self.dim]) for layer in self.ffns: with tf.name_scope(layer.name): layer.build(None) for layer in self.layer_norm2: with tf.name_scope(layer.name): layer.build([None, None, self.dim]) def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs def call( self, input_ids=None, attention_mask=None, langs=None, token_type_ids=None, position_ids=None, lengths=None, cache=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: # removed: src_enc=None, src_len=None if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: bs, slen = shape_list(input_ids) elif inputs_embeds is not None: bs, slen = shape_list(inputs_embeds)[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if lengths is None: if input_ids is not None: lengths = tf.reduce_sum( tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1 ) else: lengths = tf.convert_to_tensor([slen] * bs) # mask = input_ids != self.pad_index # check inputs # assert shape_list(lengths)[0] == bs ( tf.debugging.assert_equal(shape_list(lengths)[0], bs), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched", ) # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) # if src_enc is not None: # assert self.is_decoder # assert src_enc.size(0) == bs # generate masks mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask) # if self.is_decoder and src_enc is not None: # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None] # position_ids if position_ids is None: position_ids = tf.expand_dims(tf.range(slen), axis=0) position_ids = tf.tile(position_ids, (bs, 1)) # assert shape_list(position_ids) == [bs, slen] # (slen, bs) ( tf.debugging.assert_equal(shape_list(position_ids), [bs, slen]), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched", ) # position_ids = position_ids.transpose(0, 1) # langs if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) ( tf.debugging.assert_equal(shape_list(langs), [bs, slen]), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched", ) # langs = langs.transpose(0, 1) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.n_layers # do not recompute cached elements if cache is not None and input_ids is not None: _slen = slen - cache["slen"] input_ids = input_ids[:, -_slen:] position_ids = position_ids[:, -_slen:] if langs is not None: langs = langs[:, -_slen:] mask = mask[:, -_slen:] attn_mask = attn_mask[:, -_slen:] # embeddings if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) if langs is not None and self.use_lang_emb and self.n_langs > 1: tensor = tensor + tf.gather(self.lang_embeddings, langs) if token_type_ids is not None: tensor = tensor + self.embeddings(token_type_ids) tensor = self.layer_norm_emb(tensor) tensor = self.dropout(tensor, training=training) mask = tf.cast(mask, dtype=tensor.dtype) tensor = tensor * tf.expand_dims(mask, axis=-1) # transformer layers hidden_states = () if output_hidden_states else None attentions = () if output_attentions else None for i in range(self.n_layers): if output_hidden_states: hidden_states = hidden_states + (tensor,) # self attention attn_outputs = self.attentions[i]( tensor, attn_mask, None, cache, head_mask[i], output_attentions, training=training, ) attn = attn_outputs[0] if output_attentions: attentions = attentions + (attn_outputs[1],) attn = self.dropout(attn, training=training) tensor = tensor + attn tensor = self.layer_norm1[i](tensor) # encoder attention (for decoder only) # if self.is_decoder and src_enc is not None: # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache) # attn = nn.functional.dropout(attn, p=self.dropout, training=self.training) # tensor = tensor + attn # tensor = self.layer_norm15[i](tensor) # FFN tensor = tensor + self.ffns[i](tensor) tensor = self.layer_norm2[i](tensor) tensor = tensor * tf.expand_dims(mask, axis=-1) # Add last hidden state if output_hidden_states: hidden_states = hidden_states + (tensor,) # update cache length if cache is not None: cache["slen"] += tensor.size(1) # move back sequence length to dimension 0 # tensor = tensor.transpose(0, 1) if not return_dict: return tuple(v for v in [tensor, hidden_states, attentions] if v is not None) return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions) class TFXLMPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLMConfig base_model_prefix = "transformer" @property def dummy_inputs(self): # Sometimes XLM has language embeddings so don't forget to build them as well if needed inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32) attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32) if self.config.use_lang_emb and self.config.n_langs > 1: return { "input_ids": inputs_list, "attention_mask": attns_list, "langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32), } else: return {"input_ids": inputs_list, "attention_mask": attns_list} # Remove when XLMWithLMHead computes loss like other LM models @dataclass class TFXLMWithLMHeadModelOutput(ModelOutput): """ Base class for [`TFXLMWithLMHeadModel`] outputs. Args: logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor | None = None hidden_states: tuple[tf.Tensor, ...] | None = None attentions: tuple[tf.Tensor, ...] | None = None XLM_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`XLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLM_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) langs (`tf.Tensor` or `Numpy array` of shape `({0})`, *optional*): A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are languages ids which can be obtained from the language names by using two conversion mappings provided in the configuration of the model (only provided for multilingual models). More precisely, the *language name to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the *language id to language name* mapping is in `model.config.id2lang` (dictionary int to string). See usage examples detailed in the [multilingual documentation](../multilingual). token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*): Length of each sentence that can be used to avoid performing attention on padding token indices. You can also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in `[0, ..., input_ids.size(-1)]`. cache (`dict[str, tf.Tensor]`, *optional*): Dictionary string to `tf.Tensor` that contains precomputed hidden states (key and values in the attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential decoding. The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states. head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare XLM Model transformer outputting raw hidden-states without any specific head on top.", XLM_START_DOCSTRING, ) class TFXLMModel(TFXLMPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, langs: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, lengths: tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutput | tuple[tf.Tensor]: outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) class TFXLMPredLayer(keras.layers.Layer): """ Prediction layer (cross_entropy or adaptive_softmax). """ def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index if config.asm is False: self.input_embeddings = input_embeddings else: raise NotImplementedError # self.proj = nn.AdaptiveLogSoftmaxWithLoss( # in_features=dim, # n_classes=config.n_words, # cutoffs=config.asm_cutoffs, # div_value=config.asm_div_value, # head_bias=True, # default is False # ) def build(self, input_shape): # The output weights are the same as the input embeddings, but there is an output-only bias for each token. self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @add_start_docstrings( """ The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLM_START_DOCSTRING, ) class TFXLMWithLMHeadModel(TFXLMPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.pred_layer = TFXLMPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj") # XLM does not have past caching features self.supports_xla_generation = False def get_lm_head(self): return self.pred_layer def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.pred_layer.name def prepare_inputs_for_generation(self, inputs, **kwargs): mask_token_id = self.config.mask_token_id lang_id = self.config.lang_id effective_batch_size = inputs.shape[0] mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id inputs = tf.concat([inputs, mask_token], axis=1) if lang_id is not None: langs = tf.ones_like(inputs) * lang_id else: langs = None return {"input_ids": inputs, "langs": langs} @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLMWithLMHeadModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, langs: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, lengths: np.ndarray | tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFXLMWithLMHeadModelOutput | tuple[tf.Tensor]: transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] outputs = self.pred_layer(output) if not return_dict: return (outputs,) + transformer_outputs[1:] return TFXLMWithLMHeadModelOutput( logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "pred_layer", None) is not None: with tf.name_scope(self.pred_layer.name): self.pred_layer.build(None) @add_start_docstrings( """ XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_START_DOCSTRING, ) class TFXLMForSequenceClassification(TFXLMPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLMMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, langs: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, lengths: np.ndarray | tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFSequenceClassifierOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) @add_start_docstrings( """ XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLM_START_DOCSTRING, ) class TFXLMForMultipleChoice(TFXLMPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary") self.logits_proj = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) self.config = config @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ # Sometimes XLM has language embeddings so don't forget to build them as well if needed if self.config.use_lang_emb and self.config.n_langs > 1: return { "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), "langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), } else: return { "input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), } @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, langs: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, lengths: np.ndarray | tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFMultipleChoiceModelOutput | tuple[tf.Tensor]: if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) if lengths is not None: logger.warning( "The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the " "attention mask instead.", ) lengths = None transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, flat_langs, flat_token_type_ids, flat_position_ids, lengths, cache, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, "logits_proj", None) is not None: with tf.name_scope(self.logits_proj.name): self.logits_proj.build([None, None, self.config.num_labels]) @add_start_docstrings( """ XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLM_START_DOCSTRING, ) class TFXLMForTokenClassification(TFXLMPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLMMainLayer(config, name="transformer") self.dropout = keras.layers.Dropout(config.dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, langs: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, lengths: np.ndarray | tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFTokenClassifierOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLM_START_DOCSTRING, ) class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLMMainLayer(config, name="transformer") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, langs: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, lengths: np.ndarray | tf.Tensor | None = None, cache: dict[str, tf.Tensor] | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: bool = False, ) -> TFQuestionAnsweringModelOutput | tuple[tf.Tensor]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ transformer_outputs = self.transformer( input_ids=input_ids, attention_mask=attention_mask, langs=langs, token_type_ids=token_type_ids, position_ids=position_ids, lengths=lengths, cache=cache, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transformer", None) is not None: with tf.name_scope(self.transformer.name): self.transformer.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) __all__ = [ "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ]
transformers/src/transformers/models/xlm/modeling_tf_xlm.py/0
{ "file_path": "transformers/src/transformers/models/xlm/modeling_tf_xlm.py", "repo_id": "transformers", "token_count": 25019 }
568
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BERT checkpoint.""" import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging GLUE_TASKS_NUM_LABELS = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def convert_xlnet_checkpoint_to_pytorch( tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None ): # Initialise PyTorch model config = XLNetConfig.from_json_file(bert_config_file) finetuning_task = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}") config.finetuning_task = finetuning_task config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task] model = XLNetForSequenceClassification(config) elif "squad" in finetuning_task: config.finetuning_task = finetuning_task model = XLNetForQuestionAnswering(config) else: model = XLNetLMHeadModel(config) # Load weights from tf checkpoint load_tf_weights_in_xlnet(model, config, tf_checkpoint_path) # Save pytorch-model pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME) pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME) print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}") torch.save(model.state_dict(), pytorch_weights_dump_path) print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}") with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(config.to_json_string()) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) args = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
transformers/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1468 }
569
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ZoeDepth model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "Intel/zoedepth-nyu": "https://huggingface.co/Intel/zoedepth-nyu/resolve/main/config.json", } class ZoeDepthConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ZoeDepthForDepthEstimation`]. It is used to instantiate an ZoeDepth model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ZoeDepth [Intel/zoedepth-nyu](https://huggingface.co/Intel/zoedepth-nyu) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`Union[dict[str, Any], PretrainedConfig]`, *optional*, defaults to `BeitConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. readout_type (`str`, *optional*, defaults to `"project"`): The readout type to use when processing the readout token (CLS token) of the intermediate hidden states of the ViT backbone. Can be one of [`"ignore"`, `"add"`, `"project"`]. - "ignore" simply ignores the CLS token. - "add" passes the information from the CLS token to all other tokens by adding the representations. - "project" passes information to the other tokens by concatenating the readout to all other tokens before projecting the representation to the original feature dimension D using a linear layer followed by a GELU non-linearity. reassemble_factors (`list[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`): The up/downsampling factors of the reassemble layers. neck_hidden_sizes (`list[str]`, *optional*, defaults to `[96, 192, 384, 768]`): The hidden sizes to project to for the feature maps of the backbone. fusion_hidden_size (`int`, *optional*, defaults to 256): The number of channels before fusion. head_in_index (`int`, *optional*, defaults to -1): The index of the features to use in the heads. use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`): Whether to use batch normalization in the pre-activate residual units of the fusion blocks. use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`): Whether to use bias in the pre-activate residual units of the fusion blocks. num_relative_features (`int`, *optional*, defaults to 32): The number of features to use in the relative depth estimation head. add_projection (`bool`, *optional*, defaults to `False`): Whether to add a projection layer before the depth estimation head. bottleneck_features (`int`, *optional*, defaults to 256): The number of features in the bottleneck layer. num_attractors (`list[int], *optional*, defaults to `[16, 8, 4, 1]`): The number of attractors to use in each stage. bin_embedding_dim (`int`, *optional*, defaults to 128): The dimension of the bin embeddings. attractor_alpha (`int`, *optional*, defaults to 1000): The alpha value to use in the attractor. attractor_gamma (`int`, *optional*, defaults to 2): The gamma value to use in the attractor. attractor_kind (`str`, *optional*, defaults to `"mean"`): The kind of attractor to use. Can be one of [`"mean"`, `"sum"`]. min_temp (`float`, *optional*, defaults to 0.0212): The minimum temperature value to consider. max_temp (`float`, *optional*, defaults to 50.0): The maximum temperature value to consider. bin_centers_type (`str`, *optional*, defaults to `"softplus"`): Activation type used for bin centers. Can be "normed" or "softplus". For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. bin_configurations (`list[dict]`, *optional*, defaults to `[{'n_bins': 64, 'min_depth': 0.001, 'max_depth': 10.0}]`): Configuration for each of the bin heads. Each configuration should consist of the following keys: - name (`str`): The name of the bin head - only required in case of multiple bin configurations. - `n_bins` (`int`): The number of bins to use. - `min_depth` (`float`): The minimum depth value to consider. - `max_depth` (`float`): The maximum depth value to consider. In case only a single configuration is passed, the model will use a single head with the specified configuration. In case multiple configurations are passed, the model will use multiple heads with the specified configurations. num_patch_transformer_layers (`int`, *optional*): The number of transformer layers to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_hidden_size (`int`, *optional*): The hidden size to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_intermediate_size (`int`, *optional*): The intermediate size to use in the patch transformer. Only used in case of multiple bin configurations. patch_transformer_num_attention_heads (`int`, *optional*): The number of attention heads to use in the patch transformer. Only used in case of multiple bin configurations. Example: ```python >>> from transformers import ZoeDepthConfig, ZoeDepthForDepthEstimation >>> # Initializing a ZoeDepth zoedepth-large style configuration >>> configuration = ZoeDepthConfig() >>> # Initializing a model from the zoedepth-large style configuration >>> model = ZoeDepthForDepthEstimation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "zoedepth" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, backbone_kwargs=None, hidden_act="gelu", initializer_range=0.02, batch_norm_eps=1e-05, readout_type="project", reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[96, 192, 384, 768], fusion_hidden_size=256, head_in_index=-1, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=None, num_relative_features=32, add_projection=False, bottleneck_features=256, num_attractors=[16, 8, 4, 1], bin_embedding_dim=128, attractor_alpha=1000, attractor_gamma=2, attractor_kind="mean", min_temp=0.0212, max_temp=50.0, bin_centers_type="softplus", bin_configurations=[{"n_bins": 64, "min_depth": 0.001, "max_depth": 10.0}], num_patch_transformer_layers=None, patch_transformer_hidden_size=None, patch_transformer_intermediate_size=None, patch_transformer_num_attention_heads=None, **kwargs, ): super().__init__(**kwargs) if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") if attractor_kind not in ["mean", "sum"]: raise ValueError("Attractor_kind must be one of ['mean', 'sum']") if use_pretrained_backbone: raise ValueError("Pretrained backbones are not supported yet.") if backbone_config is not None and backbone is not None: raise ValueError("You can't specify both `backbone` and `backbone_config`.") if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `BEiT` backbone.") backbone_config = CONFIG_MAPPING["beit"]( image_size=384, num_hidden_layers=24, hidden_size=1024, intermediate_size=4096, num_attention_heads=16, use_relative_position_bias=True, reshape_hidden_states=False, out_features=["stage6", "stage12", "stage18", "stage24"], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") self.backbone_config = backbone_config self.backbone = backbone self.hidden_act = hidden_act self.use_pretrained_backbone = use_pretrained_backbone self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.readout_type = readout_type self.reassemble_factors = reassemble_factors self.neck_hidden_sizes = neck_hidden_sizes self.fusion_hidden_size = fusion_hidden_size self.head_in_index = head_in_index self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual self.use_bias_in_fusion_residual = use_bias_in_fusion_residual self.num_relative_features = num_relative_features self.add_projection = add_projection self.bottleneck_features = bottleneck_features self.num_attractors = num_attractors self.bin_embedding_dim = bin_embedding_dim self.attractor_alpha = attractor_alpha self.attractor_gamma = attractor_gamma self.attractor_kind = attractor_kind self.min_temp = min_temp self.max_temp = max_temp self.bin_centers_type = bin_centers_type self.bin_configurations = bin_configurations self.num_patch_transformer_layers = num_patch_transformer_layers self.patch_transformer_hidden_size = patch_transformer_hidden_size self.patch_transformer_intermediate_size = patch_transformer_intermediate_size self.patch_transformer_num_attention_heads = patch_transformer_num_attention_heads @property def sub_configs(self): return ( {"backbone_config": type(self.backbone_config)} if getattr(self, "backbone_config", None) is not None else {} ) __all__ = ["ZOEDEPTH_PRETRAINED_CONFIG_ARCHIVE_MAP", "ZoeDepthConfig"]
transformers/src/transformers/models/zoedepth/configuration_zoedepth.py/0
{ "file_path": "transformers/src/transformers/models/zoedepth/configuration_zoedepth.py", "repo_id": "transformers", "token_count": 4944 }
570
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from typing import TYPE_CHECKING, Any, Optional, Union import numpy as np import requests from ..generation import GenerationConfig from ..tokenization_utils import PreTrainedTokenizer from ..utils import is_torch_available, is_torchaudio_available, is_torchcodec_available, logging from .audio_utils import ffmpeg_read from .base import ChunkPipeline if TYPE_CHECKING: from pyctcdecode import BeamSearchDecoderCTC from ..feature_extraction_sequence_utils import SequenceFeatureExtractor from ..modeling_utils import PreTrainedModel logger = logging.get_logger(__name__) if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES def rescale_stride(stride, ratio): """ Rescales the stride values from audio space to tokens/logits space. (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance. """ # Shape is [B, SEQ] for tokens # [B, SEQ, V] for logits new_strides = [] for input_n, left, right in stride: token_n = int(round(input_n * ratio)) left = int(round(left / input_n * token_n)) right = int(round(right / input_n * token_n)) new_stride = (token_n, left, right) new_strides.append(new_stride) return new_strides def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right, dtype=None): inputs_len = inputs.shape[0] step = chunk_len - stride_left - stride_right for chunk_start_idx in range(0, inputs_len, step): chunk_end_idx = chunk_start_idx + chunk_len chunk = inputs[chunk_start_idx:chunk_end_idx] processed = feature_extractor( chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt", return_attention_mask=True, ) if dtype is not None: processed = processed.to(dtype=dtype) _stride_left = 0 if chunk_start_idx == 0 else stride_left is_last = chunk_end_idx >= inputs_len _stride_right = 0 if is_last else stride_right chunk_len = chunk.shape[0] stride = (chunk_len, _stride_left, _stride_right) if chunk.shape[0] > _stride_left: yield {"is_last": is_last, "stride": stride, **processed} if is_last: break def _find_longest_common_sequence(sequences, tokenizer): # TODO Use a faster algorithm this can probably be done in O(n) # using suffix array. # It might be tedious to do because of fault tolerance. # We actually have a really good property which is that the total sequence # MUST be those subsequences in order. # Also the algorithm should be more tolerant to errors. sequence = [tok_id for tok_id in sequences[0][0].tolist() if tok_id not in tokenizer.all_special_ids] for new_seq in sequences[1:]: new_sequence = [tok_id for tok_id in new_seq[0].tolist() if tok_id not in tokenizer.all_special_ids] index = 0 max_ = 0.0 for i in range(1, len(new_sequence) + 1): # epsilon to favor long perfect matches eps = i / 10000.0 matches = np.sum(np.array(sequence[-i:]) == np.array(new_sequence[:i])) matching = matches / i + eps if matches > 1 and matching > max_: index = i max_ = matching sequence.extend(new_sequence[index:]) return np.array(sequence) class AutomaticSpeechRecognitionPipeline(ChunkPipeline): """ Pipeline that aims at extracting spoken text contained within some audio. The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for to support multiple audio formats Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 - num_beams: 5 Example: ```python >>> from transformers import pipeline >>> transcriber = pipeline(model="openai/whisper-base") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac") {'text': ' He hoped there would be stew for dinner, turnips and carrots and bruised potatoes and fat mutton pieces to be ladled out in thick, peppered flour-fatten sauce.'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) Arguments: model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow. feature_extractor ([`SequenceFeatureExtractor`]): The feature extractor that will be used by the pipeline to encode waveform for the model. tokenizer ([`PreTrainedTokenizer`]): The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from [`PreTrainedTokenizer`]. decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*): [PyCTCDecode's BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180) can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information. chunk_length_s (`float`, *optional*, defaults to 0): The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). <Tip> For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`): The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables the model to *see* more context and infer letters better than without this context but the pipeline discards the stride bits at the end to make the final reconstitution as perfect as possible. <Tip> For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking blog post](https://huggingface.co/blog/asr-chunking). </Tip> framework (`str`, *optional*): The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be installed. If no framework is specified, will default to the one currently installed. If no framework is specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is provided. device (Union[`int`, `torch.device`], *optional*): Device ordinal for CPU/GPU supports. Setting this to `None` will leverage CPU, a positive will run the model on the associated CUDA device id. """ _pipeline_calls_generate = True _load_processor = False _load_image_processor = False _load_feature_extractor = True _load_tokenizer = True # Make sure the docstring is updated when the default generation config is changed _default_generation_config = GenerationConfig( max_new_tokens=256, num_beams=5, # follows openai's whisper implementation ) def __init__( self, model: "PreTrainedModel", feature_extractor: Union["SequenceFeatureExtractor", str] = None, tokenizer: Optional[PreTrainedTokenizer] = None, decoder: Optional[Union["BeamSearchDecoderCTC", str]] = None, device: Union[int, "torch.device"] = None, **kwargs, ): # set the model type so we can check we have the right pre- and post-processing parameters if model.config.model_type == "whisper": self.type = "seq2seq_whisper" elif model.__class__.__name__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.values(): self.type = "seq2seq" elif ( feature_extractor._processor_class and feature_extractor._processor_class.endswith("WithLM") and decoder is not None ): self.decoder = decoder self.type = "ctc_with_lm" else: self.type = "ctc" super().__init__(model, tokenizer, feature_extractor, device=device, **kwargs) def __call__(self, inputs: Union[np.ndarray, bytes, str, dict], **kwargs: Any) -> list[dict[str, Any]]: """ Transcribe the audio sequence(s) given as inputs to text. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more information. Args: inputs (`np.ndarray` or `bytes` or `str` or `dict`): The inputs is either : - `str` that is either the filename of a local audio file, or a public URL address to download the audio file. The file will be read at the correct sampling rate to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system. - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the same way. - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`) Raw audio at the correct sampling rate (no further check will be done) - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw": np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in decoding (but used at inference to provide more context to the model). Only use `stride` with CTC models. return_timestamps (*optional*, `str` or `bool`): Only available for pure CTC models (Wav2Vec2, HuBERT, etc) and the Whisper model. Not available for other sequence-to-sequence models. For CTC models, timestamps can take one of two formats: - `"char"`: the pipeline will return timestamps along the text for every character in the text. For instance, if you get `[{"text": "h", "timestamp": (0.5, 0.6)}, {"text": "i", "timestamp": (0.7, 0.9)}]`, then it means the model predicts that the letter "h" was spoken after `0.5` and before `0.6` seconds. - `"word"`: the pipeline will return timestamps along the text for every word in the text. For instance, if you get `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`, then it means the model predicts that the word "hi" was spoken after `0.5` and before `0.9` seconds. For the Whisper model, timestamps can take one of two formats: - `"word"`: same as above for word-level CTC timestamps. Word-level timestamps are predicted through the *dynamic-time warping (DTW)* algorithm, an approximation to word-level timestamps by inspecting the cross-attention weights. - `True`: the pipeline will return timestamps along the text for *segments* of words in the text. For instance, if you get `[{"text": " Hi there!", "timestamp": (0.5, 1.5)}]`, then it means the model predicts that the segment "Hi there!" was spoken after `0.5` and before `1.5` seconds. Note that a segment of text refers to a sequence of one or more words, rather than individual words as with word-level timestamps. generate_kwargs (`dict`, *optional*): The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a complete overview of generate, check the [following guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). Return: `Dict`: A dictionary with the following keys: - **text** (`str`): The recognized text. - **chunks** (*optional(, `list[Dict]`) When using `return_timestamps`, the `chunks` will become a list containing all the various text chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamp": (0.5, 0.9)}, {"text": "there", "timestamp": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing `"".join(chunk["text"] for chunk in output["chunks"])`. """ return super().__call__(inputs, **kwargs) def _sanitize_parameters( self, chunk_length_s=None, stride_length_s=None, ignore_warning=None, decoder_kwargs=None, return_timestamps=None, return_language=None, generate_kwargs=None, ): # No parameters on this pipeline right now preprocess_params = {} if chunk_length_s is not None: if self.type in ["seq2seq", "seq2seq_whisper"] and not ignore_warning: type_warning = ( "Using `chunk_length_s` is very experimental with seq2seq models. The results will not necessarily" " be entirely accurate and will have caveats. More information:" " https://github.com/huggingface/transformers/pull/20104. Ignore this warning with pipeline(...," " ignore_warning=True)." ) if self.type == "seq2seq_whisper": type_warning += ( " To use Whisper for long-form transcription, use rather the model's `generate` method directly " "as the model relies on it's own chunking mechanism (cf. Whisper original paper, section 3.8. " "Long-form Transcription)." ) logger.warning(type_warning) preprocess_params["chunk_length_s"] = chunk_length_s if stride_length_s is not None: preprocess_params["stride_length_s"] = stride_length_s forward_params = defaultdict(dict) if generate_kwargs is not None: forward_params.update(generate_kwargs) postprocess_params = {} if decoder_kwargs is not None: postprocess_params["decoder_kwargs"] = decoder_kwargs # in some models like whisper, the generation config has a `return_timestamps` key if hasattr(self, "generation_config") and hasattr(self.generation_config, "return_timestamps"): return_timestamps = return_timestamps or self.generation_config.return_timestamps if return_timestamps is not None: # Check whether we have a valid setting for return_timestamps and throw an error before we perform a forward pass if self.type == "seq2seq" and return_timestamps: raise ValueError("We cannot return_timestamps yet on non-CTC models apart from Whisper!") if self.type == "ctc_with_lm" and return_timestamps != "word": raise ValueError("CTC with LM can only predict word level timestamps, set `return_timestamps='word'`") if self.type == "ctc" and return_timestamps not in ["char", "word"]: raise ValueError( "CTC can either predict character level timestamps, or word level timestamps. " "Set `return_timestamps='char'` or `return_timestamps='word'` as required." ) if self.type == "seq2seq_whisper" and return_timestamps == "char": raise ValueError( "Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively." ) forward_params["return_timestamps"] = return_timestamps postprocess_params["return_timestamps"] = return_timestamps if return_language is not None: if self.type != "seq2seq_whisper": raise ValueError("Only Whisper can return language for now.") postprocess_params["return_language"] = return_language if getattr(self, "assistant_model", None) is not None: forward_params["assistant_model"] = self.assistant_model if getattr(self, "assistant_tokenizer", None) is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, postprocess_params def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): if isinstance(inputs, str): if inputs.startswith("http://") or inputs.startswith("https://"): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png inputs = requests.get(inputs).content else: with open(inputs, "rb") as f: inputs = f.read() if isinstance(inputs, bytes): inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) stride = None extra = {} if is_torch_available(): import torch if isinstance(inputs, torch.Tensor): inputs = inputs.cpu().numpy() if is_torchcodec_available(): import torchcodec if isinstance(inputs, torchcodec.decoders.AudioDecoder): _audio_samples = inputs.get_all_samples() # torchcodec always returns (num_channels, num_samples) # while before (datasets < 4.0) we had (2, num_samples) if stereo, (num_samples,) if mono _array = _audio_samples.data _array = _array[0] if _array.ndim == 2 and _array.shape[0] == 1 else _array inputs = {"array": _array, "sampling_rate": _audio_samples.sample_rate} if isinstance(inputs, dict): stride = inputs.pop("stride", None) # Accepting `"array"` which is the key defined in `datasets` for # better integration if not ("sampling_rate" in inputs and ("raw" in inputs or "array" in inputs)): raise ValueError( "When passing a dictionary to AutomaticSpeechRecognitionPipeline, the dict needs to contain a " '"raw" key containing the numpy array or torch tensor representing the audio and a "sampling_rate" key, ' "containing the sampling_rate associated with that array" ) _inputs = inputs.pop("raw", None) if _inputs is None: # Remove path which will not be used from `datasets`. inputs.pop("path", None) _inputs = inputs.pop("array", None) in_sampling_rate = inputs.pop("sampling_rate") extra = inputs inputs = _inputs if in_sampling_rate != self.feature_extractor.sampling_rate: if is_torchaudio_available(): from torchaudio import functional as F else: raise ImportError( "torchaudio is required to resample audio samples in AutomaticSpeechRecognitionPipeline. " "The torchaudio package can be installed through: `pip install torchaudio`." ) inputs = F.resample( torch.from_numpy(inputs) if isinstance(inputs, np.ndarray) else inputs, in_sampling_rate, in_sampling_rate, self.feature_extractor.sampling_rate, ).numpy() ratio = self.feature_extractor.sampling_rate / in_sampling_rate else: ratio = 1 if stride is not None: if stride[0] + stride[1] > inputs.shape[0]: raise ValueError("Stride is too large for input") # Stride needs to get the chunk length here, it's going to get # swallowed by the `feature_extractor` later, and then batching # can add extra data in the inputs, so we need to keep track # of the original length in the stride so we can cut properly. stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) if not isinstance(inputs, (np.ndarray, torch.Tensor)): raise TypeError(f"We expect a numpy ndarray or torch tensor as input, got `{type(inputs)}`") if inputs.ndim != 1: logger.warning( f"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline, got {inputs.ndim}. Taking the mean of the channels for mono conversion." ) inputs = inputs.mean(axis=0) if chunk_length_s: if stride_length_s is None: stride_length_s = chunk_length_s / 6 if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] # XXX: Carefully, this variable will not exist in `seq2seq` setting. # Currently chunking is not possible at this level for `seq2seq` so # it's ok. align_to = getattr(self.model.config, "inputs_to_logits_ratio", 1) chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to) stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to) stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to) if chunk_len < stride_left + stride_right: raise ValueError("Chunk length must be superior to stride length") for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.dtype): yield {**item, **extra} else: if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples: processed = self.feature_extractor( inputs, sampling_rate=self.feature_extractor.sampling_rate, truncation=False, padding="longest", return_tensors="pt", return_attention_mask=True, ) else: if self.type == "seq2seq_whisper" and stride is None: processed = self.feature_extractor( inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt", return_token_timestamps=True, return_attention_mask=True, ) extra["num_frames"] = processed.pop("num_frames") else: processed = self.feature_extractor( inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt", return_attention_mask=True, ) if self.dtype is not None: processed = processed.to(dtype=self.dtype) if stride is not None: if self.type == "seq2seq": raise ValueError("Stride is only usable with CTC models, try removing it !") processed["stride"] = stride yield {"is_last": True, **processed, **extra} def _forward(self, model_inputs, return_timestamps=False, **generate_kwargs): attention_mask = model_inputs.pop("attention_mask", None) stride = model_inputs.pop("stride", None) num_frames = model_inputs.pop("num_frames", None) is_last = model_inputs.pop("is_last") if stride is not None and num_frames is not None: raise ValueError("num_frames must be used only when stride is None") if self.type in {"seq2seq", "seq2seq_whisper"}: # Consume values so we can let extra information flow freely through # the pipeline (important for `partial` in microphone) if "input_features" in model_inputs: inputs = model_inputs.pop("input_features") elif "input_values" in model_inputs: inputs = model_inputs.pop("input_values") else: raise ValueError( "Seq2Seq speech recognition model requires either a " f"`input_features` or `input_values` key, but only has {model_inputs.keys()}" ) # custom processing for Whisper timestamps and word-level timestamps return_timestamps = return_timestamps or getattr(self.generation_config, "return_timestamps", False) if return_timestamps and self.type == "seq2seq_whisper": generate_kwargs["return_timestamps"] = bool(return_timestamps) if return_timestamps == "word": generate_kwargs["return_token_timestamps"] = True generate_kwargs["return_segments"] = True # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config main_input_name = self.model.main_input_name if hasattr(self.model, "main_input_name") else "inputs" generate_kwargs = { main_input_name: inputs, "attention_mask": attention_mask, **generate_kwargs, } tokens = self.model.generate(**generate_kwargs) # whisper longform generation stores timestamps in "segments" if return_timestamps == "word" and self.type == "seq2seq_whisper": if "segments" not in tokens: out = {"tokens": tokens["sequences"], "token_timestamps": tokens["token_timestamps"]} else: token_timestamps = [ torch.cat([segment["token_timestamps"] for segment in segment_list]) for segment_list in tokens["segments"] ] out = {"tokens": tokens["sequences"], "token_timestamps": token_timestamps} else: out = {"tokens": tokens} if self.type == "seq2seq_whisper": if stride is not None: out["stride"] = stride else: inputs = { self.model.main_input_name: model_inputs.pop(self.model.main_input_name), "attention_mask": attention_mask, } outputs = self.model(**inputs) logits = outputs.logits if self.type == "ctc_with_lm": out = {"logits": logits} else: out = {"tokens": logits.argmax(dim=-1)} if stride is not None: # Send stride to `postprocess`. # it needs to be handled there where # the pieces are to be concatenated. ratio = 1 / self.model.config.inputs_to_logits_ratio if isinstance(stride, tuple): out["stride"] = rescale_stride([stride], ratio)[0] else: out["stride"] = rescale_stride(stride, ratio) # Leftover extra = model_inputs return {"is_last": is_last, **out, **extra} def postprocess( self, model_outputs, decoder_kwargs: Optional[dict] = None, return_timestamps=None, return_language=None ): # Optional return types optional = {} final_items = [] key = "logits" if self.type == "ctc_with_lm" else "tokens" stride = None for outputs in model_outputs: if self.framework == "pt" and outputs[key].dtype in (torch.bfloat16, torch.float16): items = outputs[key].to(torch.float32).numpy() else: items = outputs[key].numpy() stride = outputs.get("stride", None) if stride is not None and self.type in {"ctc", "ctc_with_lm"}: total_n, left, right = stride # Total_n might be < logits.shape[1] # because of padding, that's why # we need to reconstruct this information # This won't work with left padding (which doesn't exist right now) right_n = total_n - right items = items[:, left:right_n] final_items.append(items) if stride and self.type == "seq2seq": items = _find_longest_common_sequence(final_items, self.tokenizer) elif self.type == "seq2seq_whisper": time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions # Send the chunking back to seconds, it's easier to handle in whisper sampling_rate = self.feature_extractor.sampling_rate for output in model_outputs: if "stride" in output: chunk_len, stride_left, stride_right = output["stride"] # Go back in seconds chunk_len /= sampling_rate stride_left /= sampling_rate stride_right /= sampling_rate output["stride"] = chunk_len, stride_left, stride_right text, optional = self.tokenizer._decode_asr( model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision, ) else: items = np.concatenate(final_items, axis=1) items = items.squeeze(0) if self.type == "ctc_with_lm": if decoder_kwargs is None: decoder_kwargs = {} beams = self.decoder.decode_beams(items, **decoder_kwargs) text = beams[0][0] if return_timestamps: # Simply cast from pyctcdecode format to wav2vec2 format to leverage # pre-existing code later chunk_offset = beams[0][2] offsets = [] for word, (start_offset, end_offset) in chunk_offset: offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) elif self.type != "seq2seq_whisper": skip_special_tokens = self.type != "ctc" text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens) if return_timestamps: offsets = self.tokenizer.decode( items, skip_special_tokens=skip_special_tokens, output_char_offsets=True )["char_offsets"] if return_timestamps == "word": offsets = self.tokenizer._get_word_offsets(offsets, self.tokenizer.replace_word_delimiter_char) if return_timestamps and self.type not in {"seq2seq", "seq2seq_whisper"}: chunks = [] for item in offsets: start = item["start_offset"] * self.model.config.inputs_to_logits_ratio start /= self.feature_extractor.sampling_rate stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio stop /= self.feature_extractor.sampling_rate chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)}) optional["chunks"] = chunks extra = defaultdict(list) for output in model_outputs: output.pop("tokens", None) output.pop("logits", None) output.pop("is_last", None) output.pop("stride", None) output.pop("token_timestamps", None) for k, v in output.items(): extra[k].append(v) return {"text": text, **optional, **extra}
transformers/src/transformers/pipelines/automatic_speech_recognition.py/0
{ "file_path": "transformers/src/transformers/pipelines/automatic_speech_recognition.py", "repo_id": "transformers", "token_count": 15076 }
571
import collections import types import numpy as np from ..generation import GenerationConfig from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, requires_backends, ) from .base import ArgumentHandler, Dataset, Pipeline, PipelineException, build_pipeline_init_args if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, ) if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import ( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, ) class TableQuestionAnsweringArgumentHandler(ArgumentHandler): """ Handles arguments for the TableQuestionAnsweringPipeline """ def __call__(self, table=None, query=None, **kwargs): # Returns tqa_pipeline_inputs of shape: # [ # {"table": pd.DataFrame, "query": list[str]}, # ..., # {"table": pd.DataFrame, "query" : list[str]} # ] requires_backends(self, "pandas") import pandas as pd if table is None: raise ValueError("Keyword argument `table` cannot be None.") elif query is None: if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None: tqa_pipeline_inputs = [table] elif isinstance(table, list) and len(table) > 0: if not all(isinstance(d, dict) for d in table): raise ValueError( f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}" ) if table[0].get("query") is not None and table[0].get("table") is not None: tqa_pipeline_inputs = table else: raise ValueError( "If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`" f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys." ) elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType): return table else: raise ValueError( "Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but " f"is {type(table)})" ) else: tqa_pipeline_inputs = [{"table": table, "query": query}] for tqa_pipeline_input in tqa_pipeline_inputs: if not isinstance(tqa_pipeline_input["table"], pd.DataFrame): if tqa_pipeline_input["table"] is None: raise ValueError("Table cannot be None.") tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"]) return tqa_pipeline_inputs @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) class TableQuestionAnsweringPipeline(Pipeline): """ Table Question Answering pipeline using a `ModelForTableQuestionAnswering`. This pipeline is only available in PyTorch. Unless the model you're using explicitly sets these generation parameters in its configuration files (`generation_config.json`), the following default values will be used: - max_new_tokens: 256 Example: ```python >>> from transformers import pipeline >>> oracle = pipeline(model="google/tapas-base-finetuned-wtq") >>> table = { ... "Repository": ["Transformers", "Datasets", "Tokenizers"], ... "Stars": ["36542", "4512", "3934"], ... "Contributors": ["651", "77", "34"], ... "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], ... } >>> oracle(query="How many stars does the transformers repository have?", table=table) {'answer': 'AVERAGE > 36542', 'coordinates': [(0, 1)], 'cells': ['36542'], 'aggregator': 'AVERAGE'} ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This tabular question answering pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"table-question-answering"`. The models that this pipeline can use are models that have been fine-tuned on a tabular question answering task. See the up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=table-question-answering). """ default_input_names = "table,query" _pipeline_calls_generate = True _load_processor = False _load_image_processor = False _load_feature_extractor = False _load_tokenizer = True # Make sure the docstring is updated when the default generation config is changed _default_generation_config = GenerationConfig( max_new_tokens=256, ) def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self._args_parser = args_parser if self.framework == "tf": mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() mapping.update(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) else: mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) self.check_model_type(mapping) self.aggregate = getattr(self.model.config, "aggregation_labels", None) and getattr( self.model.config, "num_aggregation_labels", None ) self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None def batch_inference(self, **inputs): return self.model(**inputs) def sequential_inference(self, **inputs): """ Inference used for models that need to process sequences in a sequential fashion, like the SQA models which handle conversational query related to a table. """ if self.framework == "pt": all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"].to(self.device) attention_mask = inputs["attention_mask"].to(self.device) token_type_ids = inputs["token_type_ids"].to(self.device) token_type_ids_example = None for index in range(batch_size): # If sequences have already been processed, the token type IDs will be created according to the previous # answer. if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device) input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) outputs = self.model( input_ids=input_ids_example.unsqueeze(0), attention_mask=attention_mask_example.unsqueeze(0), token_type_ids=token_type_ids_example.unsqueeze(0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) dist_per_token = torch.distributions.Bernoulli(logits=logits) probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( dist_per_token.probs.device ) coords_to_probs = collections.defaultdict(list) for i, p in enumerate(probabilities.squeeze().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = torch.cat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) else: all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] token_type_ids = inputs["token_type_ids"].numpy() token_type_ids_example = None for index in range(batch_size): # If sequences have already been processed, the token type IDs will be created according to the previous # answer. if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] # shape (seq_len,) model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = model_labels input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] # shape (seq_len,) token_type_ids_example = token_type_ids[index] # shape (seq_len, 7) outputs = self.model( input_ids=np.expand_dims(input_ids_example, axis=0), attention_mask=np.expand_dims(attention_mask_example, axis=0), token_type_ids=np.expand_dims(token_type_ids_example, axis=0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) probabilities = tf.math.sigmoid(tf.cast(logits, tf.float32)) * tf.cast( attention_mask_example, tf.float32 ) coords_to_probs = collections.defaultdict(list) for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = tf.concat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0)) def __call__(self, *args, **kwargs): r""" Answers queries according to a table. The pipeline accepts several types of inputs which are detailed below: - `pipeline(table, query)` - `pipeline(table, [query])` - `pipeline(table=table, query=query)` - `pipeline(table=table, query=[query])` - `pipeline({"table": table, "query": query})` - `pipeline({"table": table, "query": [query]})` - `pipeline([{"table": table, "query": query}, {"table": table, "query": query}])` The `table` argument should be a dict or a DataFrame built from that dict, containing the whole table: Example: ```python data = { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } ``` This dictionary can be passed in as such, or can be converted to a pandas DataFrame: Example: ```python import pandas as pd table = pd.DataFrame.from_dict(data) ``` Args: table (`pd.DataFrame` or `Dict`): Pandas DataFrame or dictionary that will be converted to a DataFrame containing all the table values. See above for an example of dictionary. query (`str` or `list[str]`): Query or list of queries that will be sent to the model alongside the table. sequential (`bool`, *optional*, defaults to `False`): Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate row by row, removing rows from the table. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). Return: A dictionary or a list of dictionaries containing results: Each result is a dictionary with the following keys: - **answer** (`str`) -- The answer of the query given the table. If there is an aggregator, the answer will be preceded by `AGGREGATOR >`. - **coordinates** (`list[tuple[int, int]]`) -- Coordinates of the cells of the answers. - **cells** (`list[str]`) -- List of strings made up of the answer cell values. - **aggregator** (`str`) -- If the model has an aggregator, this returns the aggregator. """ pipeline_inputs = self._args_parser(*args, **kwargs) results = super().__call__(pipeline_inputs, **kwargs) if len(results) == 1: return results[0] return results def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs): preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation forward_params = {} if sequential is not None: forward_params["sequential"] = sequential if getattr(self, "assistant_model", None) is not None: forward_params["assistant_model"] = self.assistant_model if getattr(self, "assistant_tokenizer", None) is not None: forward_params["tokenizer"] = self.tokenizer forward_params["assistant_tokenizer"] = self.assistant_tokenizer return preprocess_params, forward_params, {} def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None): if truncation is None: if self.type == "tapas": truncation = "drop_rows_to_fit" else: truncation = "do_not_truncate" table, query = pipeline_input["table"], pipeline_input["query"] if table.empty: raise ValueError("table is empty") if query is None or query == "": raise ValueError("query is empty") inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding) inputs["table"] = table return inputs def _forward(self, model_inputs, sequential=False, **generate_kwargs): table = model_inputs.pop("table") if self.type == "tapas": if sequential: outputs = self.sequential_inference(**model_inputs) else: outputs = self.batch_inference(**model_inputs) else: # User-defined `generation_config` passed to the pipeline call take precedence if "generation_config" not in generate_kwargs: generate_kwargs["generation_config"] = self.generation_config outputs = self.model.generate(**model_inputs, **generate_kwargs) model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs} return model_outputs def postprocess(self, model_outputs): inputs = model_outputs["model_inputs"] table = model_outputs["table"] outputs = model_outputs["outputs"] if self.type == "tapas": if self.aggregate: logits, logits_agg = outputs[:2] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg) answer_coordinates_batch, agg_predictions = predictions aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)} no_agg_label_index = self.model.config.no_aggregation_label_index aggregators_prefix = { i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index } else: logits = outputs[0] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits) answer_coordinates_batch = predictions[0] aggregators = {} aggregators_prefix = {} answers = [] for index, coordinates in enumerate(answer_coordinates_batch): cells = [table.iat[coordinate] for coordinate in coordinates] aggregator = aggregators.get(index, "") aggregator_prefix = aggregators_prefix.get(index, "") answer = { "answer": aggregator_prefix + ", ".join(cells), "coordinates": coordinates, "cells": [table.iat[coordinate] for coordinate in coordinates], } if aggregator: answer["aggregator"] = aggregator answers.append(answer) if len(answer) == 0: raise PipelineException("Table question answering", self.model.name_or_path, "Empty answer") else: answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] return answers if len(answers) > 1 else answers[0]
transformers/src/transformers/pipelines/table_question_answering.py/0
{ "file_path": "transformers/src/transformers/pipelines/table_question_answering.py", "repo_id": "transformers", "token_count": 9553 }
572
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # Modifications Copyright (C) 2025, Advanced Micro Devices, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Optional, Union from ..models.auto.configuration_auto import AutoConfig from ..utils import logging from ..utils.quantization_config import ( AqlmConfig, AutoRoundConfig, AwqConfig, BitNetQuantConfig, BitsAndBytesConfig, CompressedTensorsConfig, EetqConfig, FbgemmFp8Config, FineGrainedFP8Config, FPQuantConfig, GPTQConfig, HiggsConfig, HqqConfig, Mxfp4Config, QuantizationConfigMixin, QuantizationMethod, QuantoConfig, QuarkConfig, SpQRConfig, TorchAoConfig, VptqConfig, ) from .base import HfQuantizer from .quantizer_aqlm import AqlmHfQuantizer from .quantizer_auto_round import AutoRoundQuantizer from .quantizer_awq import AwqQuantizer from .quantizer_bitnet import BitNetHfQuantizer from .quantizer_bnb_4bit import Bnb4BitHfQuantizer from .quantizer_bnb_8bit import Bnb8BitHfQuantizer from .quantizer_compressed_tensors import CompressedTensorsHfQuantizer from .quantizer_eetq import EetqHfQuantizer from .quantizer_fbgemm_fp8 import FbgemmFp8HfQuantizer from .quantizer_finegrained_fp8 import FineGrainedFP8HfQuantizer from .quantizer_fp_quant import FPQuantHfQuantizer from .quantizer_gptq import GptqHfQuantizer from .quantizer_higgs import HiggsHfQuantizer from .quantizer_hqq import HqqHfQuantizer from .quantizer_mxfp4 import Mxfp4HfQuantizer from .quantizer_quanto import QuantoHfQuantizer from .quantizer_quark import QuarkHfQuantizer from .quantizer_spqr import SpQRHfQuantizer from .quantizer_torchao import TorchAoHfQuantizer from .quantizer_vptq import VptqHfQuantizer AUTO_QUANTIZER_MAPPING = { "awq": AwqQuantizer, "bitsandbytes_4bit": Bnb4BitHfQuantizer, "bitsandbytes_8bit": Bnb8BitHfQuantizer, "gptq": GptqHfQuantizer, "aqlm": AqlmHfQuantizer, "quanto": QuantoHfQuantizer, "quark": QuarkHfQuantizer, "fp_quant": FPQuantHfQuantizer, "eetq": EetqHfQuantizer, "higgs": HiggsHfQuantizer, "hqq": HqqHfQuantizer, "compressed-tensors": CompressedTensorsHfQuantizer, "fbgemm_fp8": FbgemmFp8HfQuantizer, "torchao": TorchAoHfQuantizer, "bitnet": BitNetHfQuantizer, "vptq": VptqHfQuantizer, "spqr": SpQRHfQuantizer, "fp8": FineGrainedFP8HfQuantizer, "auto-round": AutoRoundQuantizer, "mxfp4": Mxfp4HfQuantizer, } AUTO_QUANTIZATION_CONFIG_MAPPING = { "awq": AwqConfig, "bitsandbytes_4bit": BitsAndBytesConfig, "bitsandbytes_8bit": BitsAndBytesConfig, "eetq": EetqConfig, "gptq": GPTQConfig, "aqlm": AqlmConfig, "quanto": QuantoConfig, "quark": QuarkConfig, "fp_quant": FPQuantConfig, "hqq": HqqConfig, "compressed-tensors": CompressedTensorsConfig, "fbgemm_fp8": FbgemmFp8Config, "higgs": HiggsConfig, "torchao": TorchAoConfig, "bitnet": BitNetQuantConfig, "vptq": VptqConfig, "spqr": SpQRConfig, "fp8": FineGrainedFP8Config, "auto-round": AutoRoundConfig, "mxfp4": Mxfp4Config, } logger = logging.get_logger(__name__) class AutoQuantizationConfig: """ The Auto-HF quantization config class that takes care of automatically dispatching to the correct quantization config given a quantization config stored in a dictionary. """ @classmethod def from_dict(cls, quantization_config_dict: dict): quant_method = quantization_config_dict.get("quant_method") # We need a special care for bnb models to make sure everything is BC .. if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" quant_method = QuantizationMethod.BITS_AND_BYTES + suffix elif quant_method is None: raise ValueError( "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" ) if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING: raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method] return target_cls.from_dict(quantization_config_dict) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) if getattr(model_config, "quantization_config", None) is None: raise ValueError( f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized." ) quantization_config_dict = model_config.quantization_config quantization_config = cls.from_dict(quantization_config_dict) # Update with potential kwargs that are passed through from_pretrained. quantization_config.update(**kwargs) return quantization_config class AutoHfQuantizer: """ The Auto-HF quantizer class that takes care of automatically instantiating to the correct `HfQuantizer` given the `QuantizationConfig`. """ @classmethod def from_config(cls, quantization_config: Union[QuantizationConfigMixin, dict], **kwargs): # Convert it to a QuantizationConfig if the q_config is a dict if isinstance(quantization_config, dict): quantization_config = AutoQuantizationConfig.from_dict(quantization_config) quant_method = quantization_config.quant_method # Again, we need a special care for bnb as we have a single quantization config # class for both 4-bit and 8-bit quantization if quant_method == QuantizationMethod.BITS_AND_BYTES: if quantization_config.load_in_8bit: quant_method += "_8bit" else: quant_method += "_4bit" if quant_method not in AUTO_QUANTIZER_MAPPING: raise ValueError( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}" ) target_cls = AUTO_QUANTIZER_MAPPING[quant_method] return target_cls(quantization_config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): quantization_config = AutoQuantizationConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls.from_config(quantization_config) @classmethod def merge_quantization_configs( cls, quantization_config: Union[dict, QuantizationConfigMixin], quantization_config_from_args: Optional[QuantizationConfigMixin], ): """ handles situations where both quantization_config from args and quantization_config from model config are present. """ if quantization_config_from_args is not None: warning_msg = ( "You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading" " already has a `quantization_config` attribute. The `quantization_config` from the model will be used." ) else: warning_msg = "" if isinstance(quantization_config, dict): # Convert the config based on the type of quantization_config_from_args (e.g., AutoRoundConfig), which takes priority before automatic configuration dispatch. if isinstance(quantization_config_from_args, AutoRoundConfig): quantization_config = AutoRoundConfig.from_dict(quantization_config) else: quantization_config = AutoQuantizationConfig.from_dict(quantization_config) if ( quantization_config_from_args is not None and quantization_config.__class__.__name__ != quantization_config_from_args.__class__.__name__ ): raise ValueError( f"The model is quantized with {quantization_config.__class__.__name__} but you are passing a {quantization_config_from_args.__class__.__name__} config. " "Please make sure to pass the same quantization config class to `from_pretrained` with different loading attributes." ) if ( isinstance( quantization_config, (GPTQConfig, AwqConfig, AutoRoundConfig, FbgemmFp8Config, CompressedTensorsConfig, Mxfp4Config), ) and quantization_config_from_args is not None ): # special case for GPTQ / AWQ / FbgemmFp8 config collision loading_attr_dict = quantization_config_from_args.get_loading_attributes() for attr, val in loading_attr_dict.items(): setattr(quantization_config, attr, val) warning_msg += f"However, loading attributes (e.g. {list(loading_attr_dict.keys())}) will be overwritten with the one you passed to `from_pretrained`. The rest will be ignored." if warning_msg != "" and not isinstance(quantization_config, Mxfp4Config): warnings.warn(warning_msg) else: # in the case of mxfp4, we don't want to print the warning message, bit confusing for users logger.info(warning_msg) return quantization_config @staticmethod def supports_quant_method(quantization_config_dict): quant_method = quantization_config_dict.get("quant_method", None) if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False): suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit" quant_method = QuantizationMethod.BITS_AND_BYTES + suffix elif quant_method is None: raise ValueError( "The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized" ) if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING: logger.warning( f"Unknown quantization type, got {quant_method} - supported types are:" f" {list(AUTO_QUANTIZER_MAPPING.keys())}. Hence, we will skip the quantization. " "To remove the warning, you can delete the quantization_config attribute in config.json" ) return False return True def register_quantization_config(method: str): """Register a custom quantization configuration.""" def register_config_fn(cls): if method in AUTO_QUANTIZATION_CONFIG_MAPPING: raise ValueError(f"Config '{method}' already registered") if not issubclass(cls, QuantizationConfigMixin): raise TypeError("Config must extend QuantizationConfigMixin") AUTO_QUANTIZATION_CONFIG_MAPPING[method] = cls return cls return register_config_fn def register_quantizer(name: str): """Register a custom quantizer.""" def register_quantizer_fn(cls): if name in AUTO_QUANTIZER_MAPPING: raise ValueError(f"Quantizer '{name}' already registered") if not issubclass(cls, HfQuantizer): raise ValueError("Quantizer must extend HfQuantizer") AUTO_QUANTIZER_MAPPING[name] = cls return cls return register_quantizer_fn
transformers/src/transformers/quantizers/auto.py/0
{ "file_path": "transformers/src/transformers/quantizers/auto.py", "repo_id": "transformers", "token_count": 4861 }
573
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any, Optional from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import ( is_accelerate_available, is_kernels_available, is_torch_available, is_triton_available, logging, ) from .quantizers_utils import get_module_from_name if is_torch_available(): import torch logger = logging.get_logger(__name__) class Mxfp4HfQuantizer(HfQuantizer): """ FP4 quantization using fbgemm kernels """ requires_parameters_quantization = True # to remove if we decide to allow quantizing weights with this method requires_calibration = False required_packages = ["accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_torch_available(): raise ImportError( "Using mxfp4 quantization requires torch" "Please install the latest version of torch ( pip install --upgrade torch )" ) if self.quantization_config.dequantize: return if not torch.cuda.is_available(): if self.pre_quantized: logger.warning_once( "Using MXFP4 quantized models requires a GPU, we will default to dequantizing the model to bf16" ) self.quantization_config.dequantize = True return else: raise RuntimeError("Quantizing a model using MXFP4 requires a GPU") if not is_accelerate_available(): raise ImportError("Using mxfp4 requires Accelerate: `pip install accelerate`") compute_capability = torch.cuda.get_device_capability() gpu_is_supported = compute_capability >= (7, 5) kernels_available = is_triton_available("3.4.0") and is_kernels_available() if self.pre_quantized: # On unsupported GPUs or without kernels, we will dequantize the model to bf16 if not gpu_is_supported: logger.warning_once( "MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200). " "We will default to dequantizing the model to bf16." ) self.quantization_config.dequantize = True return if not kernels_available: logger.warning_once( "MXFP4 quantization requires triton >= 3.4.0 and kernels installed, we will default to dequantizing the model to bf16" ) self.quantization_config.dequantize = True return elif not gpu_is_supported: # we can't quantize the model in this case so we raise an error raise ValueError( "MXFP4 quantization is only supported on GPUs with compute capability >= 7.5 (e.g T4, A100, L4, H100, or B200)" ) elif not kernels_available: # we can't quantize the model in this case so we raise an error raise ValueError("MXFP4 quantization requires triton >= 3.4.0 and triton_kernels installed") if not self.pre_quantized: from kernels import get_kernel global triton_kernels_hub triton_kernels_hub = get_kernel("kernels-community/triton_kernels") device_map = kwargs.get("device_map") if device_map is None: logger.warning_once( "You have loaded an FP4 model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. " ) elif device_map is not None: if ( not self.pre_quantized and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to load an FP4 model with a device_map that contains a CPU or disk device." "This is not supported when the model is quantized on the fly. " "Please use a quantized checkpoint or remove the CPU or disk device from the device_map." ) def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: dtype = torch.bfloat16 logger.info( "Overriding dtype=%s with `dtype=torch.bfloat16` due to " "requirements of `fbgemm-gpu` to enable model loading in fp4. " "Pass your own dtype to specify the dtype of the remaining non-linear layers or pass" " dtype=torch.bfloat16 to remove this warning.", dtype, ) return dtype def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ): from ..integrations import Mxfp4GptOssExperts from ..models.gpt_oss.modeling_gpt_oss import GptOssExperts # if we are dequantizing, the model doesn't have scales, and blocks only params like gate_up_proj and down_proj so we need to handle this case differently if self.quantization_config.dequantize and ("blocks" in param_name or "scales" in param_name): module, tensor_name = get_module_from_name(model, param_name[: -len("_blocks")]) else: module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, Mxfp4GptOssExperts) or ( isinstance(module, GptOssExperts) and self.quantization_config.dequantize ): if tensor_name in ["down_proj_bias", "gate_up_proj_bias"]: return False return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: dict[str, Any], unexpected_keys: Optional[list[str]] = None, **kwargs, ): from ..integrations import Mxfp4GptOssExperts, dequantize, load_and_swizzle_mxfp4, quantize_to_mxfp4 from ..models.gpt_oss.modeling_gpt_oss import GptOssExperts if not self.pre_quantized: PrecisionConfig, FlexCtx, InFlexData = ( triton_kernels_hub.matmul_ogs.PrecisionConfig, triton_kernels_hub.matmul_ogs.FlexCtx, triton_kernels_hub.matmul_ogs.InFlexData, ) module, _ = get_module_from_name(model, param_name) with torch.cuda.device(target_device): if isinstance(module, Mxfp4GptOssExperts): if "gate_up_proj" in param_name: right_pad = module.gate_up_proj_right_pad bottom_pad = module.gate_up_proj_bottom_pad loaded_weight = torch.nn.functional.pad( param_value, (0, right_pad, 0, bottom_pad, 0, 0), mode="constant", value=0 ) triton_weight_tensor, weight_scale = quantize_to_mxfp4(loaded_weight) module.gate_up_proj_precision_config = PrecisionConfig( weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) module.gate_up_proj = triton_weight_tensor module.gate_up_proj_blocks = torch.nn.Parameter( triton_weight_tensor.storage.data, requires_grad=False ) elif "down_proj" in param_name: right_pad = module.down_proj_right_pad bottom_pad = module.down_proj_bottom_pad loaded_weight = torch.nn.functional.pad( param_value, (0, right_pad, 0, bottom_pad, 0, 0), mode="constant", value=0 ).to(target_device) triton_weight_tensor, weight_scale = quantize_to_mxfp4(loaded_weight) module.down_proj_precision_config = PrecisionConfig( weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData()) ) module.down_proj = triton_weight_tensor module.down_proj_blocks = torch.nn.Parameter( triton_weight_tensor.storage.data, requires_grad=False ) # we take this path if already quantized but not in a compatible way # The params going here are either gate_up_proj_blocks, or down_proj_blocks, or gate_up_proj_scales, or down_proj_scales else: empty_param = kwargs.get("empty_param") casting_dtype = kwargs.get("casting_dtype") to_contiguous = kwargs.get("to_contiguous") rank = kwargs.get("rank") device_mesh = kwargs.get("device_mesh") if ("blocks" in param_name or "scales" in param_name) and self.quantization_config.dequantize: # blocks and scales have the same length that's this works for both module, _ = get_module_from_name(model, param_name[: -len("_blocks")]) else: module, _ = get_module_from_name(model, param_name) shard_kwargs = { "empty_param": empty_param, "casting_dtype": casting_dtype, "to_contiguous": to_contiguous, "rank": rank, "device_mesh": device_mesh, "model": model, } if isinstance(module, Mxfp4GptOssExperts) or ( isinstance(module, GptOssExperts) and self.quantization_config.dequantize ): if self.quantization_config.dequantize: # dq_param_name is the name of the parameter without the blocks or scales suffix, it's used in this case since we don't switch linears # so we only have the original param name dq_param_name = param_name[: -len("_blocks")] dequantize(module, param_name, param_value, target_device, dq_param_name, **shard_kwargs) else: load_and_swizzle_mxfp4( module, param_name, param_value, target_device, **shard_kwargs, ) def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): # we are not really dequantizing, we are just removing everything related to quantization here if self.quantization_config.dequantize: self.remove_quantization_config(model) # clean cache due to triton ops if torch.cuda.is_available(): torch.cuda.empty_cache() def update_expected_keys(self, model: "PreTrainedModel", expected_keys: list[str], checkpoint_keys: list[str]): # Replace expected_keys for experts' gate_up_proj and down_proj with their _blocks and _scales variants new_expected_keys = [] for key in expected_keys: if key.endswith(".mlp.experts.gate_up_proj"): base = key[: -len("gate_up_proj")] new_expected_keys.append(base + "gate_up_proj_blocks") new_expected_keys.append(base + "gate_up_proj_scales") elif key.endswith(".mlp.experts.down_proj"): base = key[: -len("down_proj")] new_expected_keys.append(base + "down_proj_blocks") new_expected_keys.append(base + "down_proj_scales") else: new_expected_keys.append(key) return new_expected_keys def _process_model_before_weight_loading( self, model: "PreTrainedModel", keep_in_fp32_modules: Optional[list[str]] = None, **kwargs, ): from ..integrations import replace_with_mxfp4_linear self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules ) use_kernels = kwargs.get("use_kernels", False) # if we are using kernels, we can't use the quantized model, since the forward pass is different and needs special handling if use_kernels: logger.warning_once( "You are using full precision kernels, we will dequantize the model to bf16. " "To use the quantized model with quantization kernels, please set use_kernels=False" ) self.quantization_config.dequantize = True config = model.config model = replace_with_mxfp4_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, config=config, ) model.config.quantization_config = self.quantization_config def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]: from ..integrations import Mxfp4GptOssExperts not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, Mxfp4GptOssExperts): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def update_tp_plan(self, config): if "GptOssConfig" in config.__class__.__name__: if getattr(config, "base_model_tp_plan", None) is not None: config.base_model_tp_plan.update( { "layers.*.mlp.experts.gate_up_proj_blocks": "grouped_gemm", "layers.*.mlp.experts.gate_up_proj_scales": "grouped_gemm", "layers.*.mlp.experts.down_proj_blocks": "grouped_gemm", "layers.*.mlp.experts.down_proj_scales": "grouped_gemm", } ) return config def update_param_name(self, param_name: str) -> str: if self.quantization_config.dequantize: if "_blocks" in param_name: return param_name.replace("_blocks", "") elif "_scales" in param_name: return param_name.replace("_scales", "") return param_name def is_serializable(self, safe_serialization=None): logger.warning_once("MXFP4 quantization is not serializable using safetensors for now") return False @property def is_trainable(self) -> bool: logger.warning_once( "MXFP4 quantization don't support training, please consider dequantizing the model first by passing quantization_config=Mxfp4Config(dequantize=True) to .from_pretrained()" ) return False
transformers/src/transformers/quantizers/quantizer_mxfp4.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_mxfp4.py", "repo_id": "transformers", "token_count": 7664 }
574
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary of output with special method for the Fast tokenizers) """ import copy import json import os import re import warnings from collections import UserDict from collections.abc import Mapping, Sequence, Sized from contextlib import contextmanager from dataclasses import dataclass from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, NamedTuple, Optional, Union import numpy as np from packaging import version from . import __version__ from .dynamic_module_utils import custom_object_save from .utils import ( CHAT_TEMPLATE_DIR, CHAT_TEMPLATE_FILE, ExplicitEnum, PaddingStrategy, PushToHubMixin, TensorType, add_end_docstrings, cached_file, copy_func, download_url, extract_commit_hash, is_flax_available, is_jax_tensor, is_mlx_available, is_numpy_array, is_offline_mode, is_protobuf_available, is_remote_url, is_tf_available, is_tf_tensor, is_tokenizers_available, is_torch_available, is_torch_device, is_torch_tensor, list_repo_templates, logging, requires_backends, to_py_obj, ) from .utils.chat_template_utils import render_jinja_template from .utils.import_utils import PROTOBUF_IMPORT_ERROR if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp # noqa: F401 def import_protobuf_decode_error(error_message=""): if is_protobuf_available(): from google.protobuf.message import DecodeError return DecodeError else: raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message)) if is_tokenizers_available(): from tokenizers import AddedToken from tokenizers import Encoding as EncodingFast else: @dataclass(frozen=False, eq=True) class AddedToken: """ AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the way it should behave. The `normalized` will default to `not special` if it is not specified, similarly to the definition in `tokenizers`. """ def __init__( self, content: str, single_word=False, lstrip=False, rstrip=False, special=False, normalized=None ): self.content = content self.single_word = single_word self.lstrip = lstrip self.rstrip = rstrip self.special = special self.normalized = normalized if normalized is not None else not special def __getstate__(self): return self.__dict__ def __str__(self): return self.content @dataclass class EncodingFast: """This is dummy class because without the `tokenizers` library we don't have these objects anyway""" pass logger = logging.get_logger(__name__) VERY_LARGE_INTEGER = int(1e30) # This is used to set the max input length for a model with infinite size input LARGE_INTEGER = int(1e20) # This is used when we need something big but slightly smaller than VERY_LARGE_INTEGER # Define type aliases and NamedTuples TextInput = str PreTokenizedInput = list[str] EncodedInput = list[int] TextInputPair = tuple[str, str] PreTokenizedInputPair = tuple[list[str], list[str]] EncodedInputPair = tuple[list[int], list[int]] # Define type aliases for text-related non-text modalities AudioInput = Union["np.ndarray", "torch.Tensor", list["np.ndarray"], list["torch.Tensor"]] # Slow tokenizers used to be saved in three separated files SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" # Fast tokenizers (provided by HuggingFace tokenizer's library) can be saved in a single file FULL_TOKENIZER_FILE = "tokenizer.json" _re_tokenizer_file = re.compile(r"tokenizer\.(.*)\.json") class TruncationStrategy(ExplicitEnum): """ Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an IDE. """ ONLY_FIRST = "only_first" ONLY_SECOND = "only_second" LONGEST_FIRST = "longest_first" DO_NOT_TRUNCATE = "do_not_truncate" class CharSpan(NamedTuple): """ Character span in the original string. Args: start (`int`): Index of the first character in the original string. end (`int`): Index of the character following the last character in the original string. """ start: int end: int class TokenSpan(NamedTuple): """ Token span in an encoded string (list of tokens). Args: start (`int`): Index of the first token in the span. end (`int`): Index of the token following the last token in the span. """ start: int end: int class BatchEncoding(UserDict): """ Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`], [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc). This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes utility methods to map from word/character space to token space. Args: data (`dict`, *optional*): Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods ('input_ids', 'attention_mask', etc.). encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*): If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this information. tensor_type (`Union[None, str, TensorType]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. prepend_batch_axis (`bool`, *optional*, defaults to `False`): Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). Note that this parameter has an effect if the parameter `tensor_type` is set, *otherwise has no effect*. n_sequences (`Optional[int]`, *optional*): You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at initialization. """ def __init__( self, data: Optional[dict[str, Any]] = None, encoding: Optional[Union[EncodingFast, Sequence[EncodingFast]]] = None, tensor_type: Union[None, str, TensorType] = None, prepend_batch_axis: bool = False, n_sequences: Optional[int] = None, ): super().__init__(data) if isinstance(encoding, EncodingFast): encoding = [encoding] self._encodings = encoding if n_sequences is None and encoding is not None and encoding: n_sequences = encoding[0].n_sequences self._n_sequences = n_sequences self.convert_to_tensors(tensor_type=tensor_type, prepend_batch_axis=prepend_batch_axis) @property def n_sequences(self) -> Optional[int]: """ `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of sentences) """ return self._n_sequences @property def is_fast(self) -> bool: """ `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`] or not. """ return self._encodings is not None def __getitem__(self, item: Union[int, str]) -> Union[Any, EncodingFast]: """ If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.). If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`. If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.) with the constraint of slice. """ if isinstance(item, str): return self.data[item] elif self._encodings is not None: return self._encodings[item] elif isinstance(item, slice): return {key: self.data[key][item] for key in self.data} else: raise KeyError( "Invalid key. Only three types of key are available: " "(1) string, (2) integers for backend Encoding, and (3) slices for data subsetting." ) def __getattr__(self, item: str): try: return self.data[item] except KeyError: raise AttributeError def __getstate__(self): return {"data": self.data, "encodings": self._encodings} def __setstate__(self, state): if "data" in state: self.data = state["data"] if "encodings" in state: self._encodings = state["encodings"] # After this point: # Extended properties and methods only available for fast (Rust-based) tokenizers # provided by HuggingFace tokenizers library. @property def encodings(self) -> Optional[list[EncodingFast]]: """ `Optional[list[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if the input was tokenized through Python (i.e., not a fast) tokenizer. """ return self._encodings def tokens(self, batch_index: int = 0) -> list[str]: """ Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to integer indices) at a given batch index (only works for the output of a fast tokenizer). Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `list[str]`: The list of tokens at that index. """ if not self._encodings: raise ValueError( "tokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].tokens def sequence_ids(self, batch_index: int = 0) -> list[Optional[int]]: """ Return a list mapping the tokens to the id of their original sentences: - `None` for special tokens added around or between sequences, - `0` for tokens corresponding to words in the first sequence, - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly encoded. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `list[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding sequence. """ if not self._encodings: raise ValueError( "sequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].sequence_ids def words(self, batch_index: int = 0) -> list[Optional[int]]: """ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `list[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word). """ if not self._encodings: raise ValueError( "words() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) warnings.warn( "`BatchEncoding.words()` property is deprecated and should be replaced with the identical, " "but more self-explanatory `BatchEncoding.word_ids()` property.", FutureWarning, ) return self.word_ids(batch_index) def word_ids(self, batch_index: int = 0) -> list[Optional[int]]: """ Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer. Args: batch_index (`int`, *optional*, defaults to 0): The index to access in the batch. Returns: `list[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word (several tokens will be mapped to the same word index if they are parts of that word). """ if not self._encodings: raise ValueError( "word_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast`" " class)." ) return self._encodings[batch_index].word_ids def token_to_sequence(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the sequence represented by the given token. In the general use case, this method returns `0` for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair Can be called as: - `self.token_to_sequence(token_index)` if batch size is 1 - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the sequence. Returns: `int`: Index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_sequence() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_sequence(token_index) def token_to_word(self, batch_or_token_index: int, token_index: Optional[int] = None) -> int: """ Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch. Can be called as: - `self.token_to_word(token_index)` if batch size is 1 - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e., words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the sequence. Returns: `int`: Index of the word in the input sequence. """ if not self._encodings: raise ValueError("token_to_word() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index if batch_index < 0: batch_index = self._batch_size + batch_index if token_index < 0: token_index = self._seq_len + token_index return self._encodings[batch_index].token_to_word(token_index) def word_to_tokens( self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 ) -> Optional[TokenSpan]: """ Get the encoded token span corresponding to a word in a sequence of the batch. Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with: - **start** -- Index of the first token. - **end** -- Index of the token following the last token. Can be called as: - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1 - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_word_index (`int`): Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of the word in the sequence. word_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to. Returns: ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns `None` if no tokens correspond to the word. This can happen especially when the token is a special token that has been used to format the tokenization. For example when we add a class token at the very beginning of the tokenization. """ if not self._encodings: raise ValueError("word_to_tokens() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index if batch_index < 0: batch_index = self._batch_size + batch_index if word_index < 0: word_index = self._seq_len + word_index span = self._encodings[batch_index].word_to_tokens(word_index, sequence_index) return TokenSpan(*span) if span is not None else None def token_to_chars(self, batch_or_token_index: int, token_index: Optional[int] = None) -> Optional[CharSpan]: """ Get the character span corresponding to an encoded token in a sequence of the batch. Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with: - **start** -- Index of the first character in the original string associated to the token. - **end** -- Index of the character following the last character in the original string associated to the token. Can be called as: - `self.token_to_chars(token_index)` if batch size is 1 - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1 Args: batch_or_token_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the token in the sequence. token_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in the sequence. Returns: [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token (e.g. <s>, </s>) doesn't correspond to any chars in the origin string. """ if not self._encodings: raise ValueError("token_to_chars() is not available when using Python based tokenizers") if token_index is not None: batch_index = batch_or_token_index else: batch_index = 0 token_index = batch_or_token_index span_indices = self._encodings[batch_index].token_to_chars(token_index) return CharSpan(*span_indices) if span_indices is not None else None def char_to_token( self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0 ) -> int: """ Get the index of the token in the encoded output comprising a character in the original string for a sequence of the batch. Can be called as: - `self.char_to_token(char_index)` if batch size is 1 - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is trimmed with `trim_offsets=True`. """ if not self._encodings: raise ValueError("char_to_token() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_token(char_index, sequence_index) def word_to_chars( self, batch_or_word_index: int, word_index: Optional[int] = None, sequence_index: int = 0 ) -> CharSpan: """ Get the character span in the original string corresponding to given word in a sequence of the batch. Character spans are returned as a CharSpan NamedTuple with: - start: index of the first character in the original string - end: index of the character following the last character in the original string Can be called as: - `self.word_to_chars(word_index)` if batch size is 1 - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1 Args: batch_or_word_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the word in the sequence word_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the sequence. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided word index belongs to. Returns: `CharSpan` or `list[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan are NamedTuple with: - start: index of the first character associated to the token in the original string - end: index of the character following the last character associated to the token in the original string """ if not self._encodings: raise ValueError("word_to_chars() is not available when using Python based tokenizers") if word_index is not None: batch_index = batch_or_word_index else: batch_index = 0 word_index = batch_or_word_index return CharSpan(*(self._encodings[batch_index].word_to_chars(word_index, sequence_index))) def char_to_word(self, batch_or_char_index: int, char_index: Optional[int] = None, sequence_index: int = 0) -> int: """ Get the word in the original string corresponding to a character in the original string of a sequence of the batch. Can be called as: - `self.char_to_word(char_index)` if batch size is 1 - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1 This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized words. Args: batch_or_char_index (`int`): Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of the character in the original string. char_index (`int`, *optional*): If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the original string. sequence_index (`int`, *optional*, defaults to 0): If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0 or 1) the provided character index belongs to. Returns: `int` or `list[int]`: Index or indices of the associated encoded token(s). """ if not self._encodings: raise ValueError("char_to_word() is not available when using Python based tokenizers") if char_index is not None: batch_index = batch_or_char_index else: batch_index = 0 char_index = batch_or_char_index return self._encodings[batch_index].char_to_word(char_index, sequence_index) def convert_to_tensors( self, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False ): """ Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done. prepend_batch_axis (`int`, *optional*, defaults to `False`): Whether or not to add the batch dimension during the conversion. """ if tensor_type is None: return self # Convert to TensorType if not isinstance(tensor_type, TensorType): tensor_type = TensorType(tensor_type) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf as_tensor = tf.constant is_tensor = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch is_tensor = torch.is_tensor def as_tensor(value, dtype=None): if isinstance(value, list) and isinstance(value[0], np.ndarray): return torch.from_numpy(np.array(value)) return torch.tensor(value) elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 as_tensor = jnp.array is_tensor = is_jax_tensor elif tensor_type == TensorType.MLX: if not is_mlx_available(): raise ImportError("Unable to convert output to MLX tensors format, MLX is not installed.") import mlx.core as mx as_tensor = mx.array def is_tensor(obj): return isinstance(obj, mx.array) else: def as_tensor(value, dtype=None): if isinstance(value, (list, tuple)) and isinstance(value[0], (list, tuple, np.ndarray)): value_lens = [len(val) for val in value] if len(set(value_lens)) > 1 and dtype is None: # we have a ragged list so handle explicitly value = as_tensor([np.asarray(val) for val in value], dtype=object) return np.asarray(value, dtype=dtype) is_tensor = is_numpy_array # Do the tensor conversion in batch for key, value in self.items(): try: if prepend_batch_axis: value = [value] if not is_tensor(value): tensor = as_tensor(value) # Removing this for now in favor of controlling the shape with `prepend_batch_axis` # # at-least2d # if tensor.ndim > 2: # tensor = tensor.squeeze(0) # elif tensor.ndim < 2: # tensor = tensor[None, :] self[key] = tensor except Exception as e: if key == "overflowing_tokens": raise ValueError( "Unable to create tensor returning overflowing tokens of different lengths. " "Please see if a fast version of this tokenizer is available to have this feature available." ) from e raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding with" " 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your" f" features (`{key}` in this case) have excessive nesting (inputs type `list` where type `int` is" " expected)." ) from e return self def to(self, device: Union[str, "torch.device"], *, non_blocking: bool = False) -> "BatchEncoding": """ Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only). Args: device (`str` or `torch.device`): The device to put the tensors on. non_blocking (`bool`): Whether to perform the copy asynchronously. Returns: [`BatchEncoding`]: The same instance after modification. """ requires_backends(self, ["torch"]) # This check catches things like APEX blindly calling "to" on all inputs to a module # Otherwise it passes the casts down and casts the LongTensor containing the token idxs # into a HalfTensor if isinstance(device, str) or is_torch_device(device) or isinstance(device, int): self.data = { k: v.to(device=device, non_blocking=non_blocking) if hasattr(v, "to") and callable(v.to) else v for k, v in self.data.items() } else: logger.warning(f"Attempting to cast a BatchEncoding to type {str(device)}. This is not supported.") return self class SpecialTokensMixin: """ A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to special tokens. In particular, this class hold the attributes which can be used to directly access these special tokens in a model-independent manner and allow to set and update the special tokens. Args: bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be skipped when decoding if `skip_special_tokens` is set to `True`. """ SPECIAL_TOKENS_ATTRIBUTES = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", "additional_special_tokens", ] def __init__(self, verbose=False, **kwargs): self._pad_token_type_id = 0 self.verbose = verbose self._special_tokens_map = dict.fromkeys(self.SPECIAL_TOKENS_ATTRIBUTES) self._special_tokens_map["additional_special_tokens"] = [] # for BC where it defaults to empty list # We directly set the hidden value to allow initialization with special tokens # which are not yet in the vocabulary. Necessary for serialization/de-serialization # TODO clean this up at some point (probably by switching to fast tokenizers) for key, value in kwargs.items(): if value is None: continue if key in self.SPECIAL_TOKENS_ATTRIBUTES: if key == "additional_special_tokens": assert isinstance(value, (list, tuple)), f"Value {value} is not a list or tuple" assert all(isinstance(t, (str, AddedToken)) for t in value), ( "One of the tokens is not a string or an AddedToken" ) setattr(self, key, value) elif isinstance(value, (str, AddedToken)): setattr(self, key, value) else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") def sanitize_special_tokens(self) -> int: """ The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in transformers v5. """ logger.warning_once("The `sanitize_special_tokens` will be removed in transformers v5.") return self.add_tokens(self.all_special_tokens_extended, special_tokens=True) def add_special_tokens( self, special_tokens_dict: dict[str, Union[str, AddedToken, Sequence[Union[str, AddedToken]]]], replace_additional_special_tokens=True, ) -> int: """ Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the current vocabulary). When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Using `add_special_tokens` will ensure your special tokens can be used in several ways: - Special tokens can be skipped when decoding using `skip_special_tokens = True`. - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`. - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts. When possible, special tokens are already registered for provided pretrained models (for instance [`BertTokenizer`] `cls_token` is already registered to be `'[CLS]'` and XLM's one is also registered to be `'</s>'`). Args: special_tokens_dict (dictionary *str* to *str*, `tokenizers.AddedToken`, or `Sequence[Union[str, AddedToken]]`): Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`]. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the `unk_token` to them). replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`): If `True`, the existing list of additional special tokens will be replaced by the list provided in `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous `additional_special_tokens` are still added tokens, and will not be split by the model. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2Model.from_pretrained("openai-community/gpt2") special_tokens_dict = {"cls_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ```""" if not special_tokens_dict: return 0 added_tokens = [] for key, value in special_tokens_dict.items(): assert key in self.SPECIAL_TOKENS_ATTRIBUTES, f"Key {key} is not a special token" if self.verbose: logger.info(f"Assigning {value} to the {key} key of the tokenizer") if key == "additional_special_tokens": assert isinstance(value, (list, tuple)) and all(isinstance(t, (str, AddedToken)) for t in value), ( f"Tokens {value} for key {key} should all be str or AddedToken instances" ) to_add = [] for token in value: if isinstance(token, str): # for legacy purpose we default to stripping. `test_add_tokens_tokenizer` depends on this token = AddedToken(token, rstrip=False, lstrip=False, normalized=False, special=True) if not replace_additional_special_tokens and str(token) in self.additional_special_tokens: continue to_add.append(token) if replace_additional_special_tokens and len(to_add) > 0: setattr(self, key, list(to_add)) else: self._special_tokens_map["additional_special_tokens"].extend(to_add) added_tokens += to_add else: if not isinstance(value, (str, AddedToken)): raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance") if isinstance(value, (str)): # for legacy purpose we default to stripping. `False` depends on this value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True) if isinstance(value, AddedToken): setattr(self, key, value) if value not in added_tokens: added_tokens.append(value) # if we are adding tokens that were not part of the vocab, we ought to add them added_tokens = self.add_tokens(added_tokens, special_tokens=True) return added_tokens def add_tokens( self, new_tokens: Union[str, AddedToken, Sequence[Union[str, AddedToken]]], special_tokens: bool = False ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. Args: new_tokens (`str`, `tokenizers.AddedToken` or a sequence of *str* or `tokenizers.AddedToken`): Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string token to let you personalize its behavior: whether this token should only match against a single word, whether this token should strip all potential whitespaces on the left side, whether this token should strip all potential whitespaces on the right side, etc. special_tokens (`bool`, *optional*, defaults to `False`): Can be used to specify if the token is a special token. This mostly change the normalization behavior (special tokens like CLS or [MASK] are usually not lower-cased for instance). See details for `tokenizers.AddedToken` in HuggingFace tokenizers library. Returns: `int`: Number of tokens added to the vocabulary. Examples: ```python # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased") model = BertModel.from_pretrained("google-bert/bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ```""" if not new_tokens: return 0 if not isinstance(new_tokens, (list, tuple)): new_tokens = [new_tokens] return self._add_tokens(new_tokens, special_tokens=special_tokens) def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int: raise NotImplementedError @property def pad_token_type_id(self) -> int: """ `int`: Id of the padding token type in the vocabulary. """ return self._pad_token_type_id def __setattr__(self, key, value): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4] if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): if key_is_special_id: if value is not None: value = ( self.convert_ids_to_tokens(value) if key != "additional_special_tokens" else [self.convert_ids_to_tokens(val) for val in value] ) key = key_without_id if key != "additional_special_tokens" and not isinstance(value, (str, AddedToken)) and value is not None: raise ValueError(f"Cannot set a non-string value as the {key}") self._special_tokens_map[key] = value else: super().__setattr__(key, value) def __getattr__(self, key): key_without_id = key key_is_special_id = key.endswith("_id") or key.endswith("_ids") if key_is_special_id: key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4] if self.__dict__.get("_special_tokens_map", None) is not None and any( name in self.__dict__["_special_tokens_map"] for name in [key, key_without_id] ): _special_tokens_map = self.__dict__["_special_tokens_map"] if not key_is_special_id: if _special_tokens_map[key] is None: if self.verbose: logger.error(f"Using {key}, but it is not set yet.") return None value = _special_tokens_map[key] return str(value) if key != "additional_special_tokens" else [str(tok) for tok in value] else: attr_as_tokens = getattr(self, key_without_id) return self.convert_tokens_to_ids(attr_as_tokens) if attr_as_tokens is not None else None if key not in self.__dict__: raise AttributeError(f"{self.__class__.__name__} has no attribute {key}") else: return super().__getattr__(key) @property def special_tokens_map(self) -> dict[str, Union[str, list[str]]]: """ `dict[str, Union[str, list[str]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Convert potential tokens of `tokenizers.AddedToken` type to string. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = getattr(self, attr) if attr_value: set_attr[attr] = attr_value return set_attr @property def special_tokens_map_extended(self) -> dict[str, Union[str, AddedToken, list[Union[str, AddedToken]]]]: """ `dict[str, Union[str, tokenizers.AddedToken, list[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.). Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ set_attr = {} for attr in self.SPECIAL_TOKENS_ATTRIBUTES: attr_value = self._special_tokens_map[attr] if attr_value: set_attr[attr] = attr_value return set_attr @property def all_special_tokens_extended(self) -> list[Union[str, AddedToken]]: """ `list[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has nothing to do with the index of each tokens. If you want to know the correct indices, check `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`. Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how special tokens are tokenized. """ all_tokens = [] seen = set() for value in self.special_tokens_map_extended.values(): if isinstance(value, (list, tuple)): tokens_to_add = [token for token in value if str(token) not in seen] else: tokens_to_add = [value] if str(value) not in seen else [] seen.update(map(str, tokens_to_add)) all_tokens.extend(tokens_to_add) return all_tokens @property def all_special_tokens(self) -> list[str]: """ `list[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.). Convert tokens of `tokenizers.AddedToken` type to string. """ all_toks = [str(s) for s in self.all_special_tokens_extended] return all_toks @property def all_special_ids(self) -> list[int]: """ `list[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes. """ all_toks = self.all_special_tokens all_ids = self.convert_tokens_to_ids(all_toks) return all_ids def _set_model_specific_special_tokens(self, special_tokens: list[str]): """ Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part of "self.special_tokens" and saved as a special token in tokenizer's config. This allows us to dynamically add new model-type specific tokens after initializing the tokenizer. For example: if the model tokenizers is multimodal, we can support special image or audio tokens. """ self.SPECIAL_TOKENS_ATTRIBUTES = self.SPECIAL_TOKENS_ATTRIBUTES + list(special_tokens.keys()) for key, value in special_tokens.items(): if isinstance(value, (str, AddedToken)): self._special_tokens_map[key] = value else: raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}") ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is useful if you want to add `bos` or `eos` tokens automatically. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. is_split_into_words (`bool`, *optional*, defaults to `False`): Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. """ ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`) """ INIT_TOKENIZER_DOCSTRING = r""" Class attributes (overridden by derived classes) - **vocab_files_names** (`dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string). - **pretrained_vocab_files_map** (`dict[str, dict[str, str]]`) -- A dictionary of dictionaries, with the high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the associated pretrained vocabulary file. - **model_input_names** (`list[str]`) -- A list of inputs expected in the forward pass of the model. - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied. Should be `'right'` or `'left'`. - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation applied. Should be `'right'` or `'left'`. Args: model_max_length (`int`, *optional*): The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. truncation_side (`str`, *optional*): The side on which the model should have truncation applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. chat_template (`str`, *optional*): A Jinja template string that will be used to format lists of chat messages. See https://huggingface.co/docs/transformers/chat_templating for a full description. model_input_names (`list[string]`, *optional*): The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or `"attention_mask"`). Default value is picked from the class attribute of the same name. bos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and `self.bos_token_id`. eos_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the end of a sentence. Will be associated to `self.eos_token` and `self.eos_token_id`. unk_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and `self.unk_token_id`. sep_token (`str` or `tokenizers.AddedToken`, *optional*): A special token separating two different sentences in the same input (used by BERT for instance). Will be associated to `self.sep_token` and `self.sep_token_id`. pad_token (`str` or `tokenizers.AddedToken`, *optional*): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`. cls_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing the class of the input (used by BERT for instance). Will be associated to `self.cls_token` and `self.cls_token_id`. mask_token (`str` or `tokenizers.AddedToken`, *optional*): A special token representing a masked token (used by masked-language modeling pretraining objectives, like BERT). Will be associated to `self.mask_token` and `self.mask_token_id`. additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*): A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end of the vocabulary. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): Whether or not the model should cleanup the spaces that were added when splitting the input text during the tokenization process. split_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the special tokens should be split during the tokenization process. Passing will affect the internal state of the tokenizer. The default behavior is to not split special tokens. This means that if `<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`. """ @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): """ Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`]. Handles shared (mostly boiler plate) methods for those two classes. """ vocab_files_names: dict[str, str] = {} pretrained_vocab_files_map: dict[str, dict[str, str]] = {} _auto_class: Optional[str] = None # first name has to correspond to main model input name # to make sure `tokenizer.pad(...)` works correctly model_input_names: list[str] = ["input_ids", "token_type_ids", "attention_mask"] padding_side: str = "right" truncation_side: str = "right" slow_tokenizer_class = None def __init__(self, **kwargs): # inputs and kwargs for saving and re-loading (see ``from_pretrained`` and ``save_pretrained``) self.init_inputs = () for key in kwargs: if hasattr(self, key) and callable(getattr(self, key)): raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}") self.init_kwargs = copy.deepcopy(kwargs) self.name_or_path = kwargs.pop("name_or_path", "") self._processor_class = kwargs.pop("processor_class", None) # For backward compatibility we fallback to set model_max_length from max_len if provided model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None)) self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER # Padding and truncation side are right by default and overridden in subclasses. If specified in the kwargs, it # is changed. self.padding_side = kwargs.pop("padding_side", self.padding_side) if self.padding_side not in ["right", "left"]: raise ValueError( f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}" ) self.truncation_side = kwargs.pop("truncation_side", self.truncation_side) if self.truncation_side not in ["right", "left"]: raise ValueError( f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}" ) self.model_input_names = kwargs.pop("model_input_names", self.model_input_names) # By default, cleaning tokenization spaces for both fast and slow tokenizers self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False) # By default, do not split special tokens for both fast and slow tokenizers self.split_special_tokens = kwargs.pop("split_special_tokens", False) self.deprecation_warnings = {} # Use to store when we have already noticed a deprecation warning (avoid overlogging). self._in_target_context_manager = False # Stores a Jinja template that formats chat histories into tokenizable strings self.chat_template = kwargs.pop("chat_template", None) if isinstance(self.chat_template, (list, tuple)): # Chat templates are stored as lists of dicts with fixed key names, # we reconstruct that into a single dict while loading them. self.chat_template = {template["name"]: template["template"] for template in self.chat_template} super().__init__(**kwargs) self.extra_special_tokens = kwargs.pop("extra_special_tokens", {}) self._set_model_specific_special_tokens(special_tokens=self.extra_special_tokens) @property def max_len_single_sentence(self) -> int: """ `int`: The maximum length of a sentence that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=False) @property def max_len_sentences_pair(self) -> int: """ `int`: The maximum combined length of a pair of sentences that can be fed to the model. """ return self.model_max_length - self.num_special_tokens_to_add(pair=True) @max_len_single_sentence.setter def max_len_single_sentence(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_single_sentence'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose: if not self.deprecation_warnings.get("max_len_single_sentence", False): logger.warning( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_single_sentence"] = True else: raise ValueError( "Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up." ) @max_len_sentences_pair.setter def max_len_sentences_pair(self, value) -> int: # For backward compatibility, allow to try to setup 'max_len_sentences_pair'. if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose: if not self.deprecation_warnings.get("max_len_sentences_pair", False): logger.warning( "Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up." ) self.deprecation_warnings["max_len_sentences_pair"] = True else: raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.") def _set_processor_class(self, processor_class: str): """Sets processor class as an attribute.""" self._processor_class = processor_class @property def added_tokens_decoder(self) -> dict[int, AddedToken]: raise NotImplementedError() def __repr__(self) -> str: added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()]) return ( f"{self.__class__.__name__}(name_or_path='{self.name_or_path}'," f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}," f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}'," f" special_tokens={self.special_tokens_map}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces}," " added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}\n)" ) def __len__(self) -> int: raise NotImplementedError() def get_vocab(self) -> dict[str, int]: """ Returns the vocabulary as a dictionary of token to index. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. Returns: `dict[str, int]`: The vocabulary. """ raise NotImplementedError() def apply_chat_template( self, conversation: Union[list[dict[str, str]], list[list[dict[str, str]]]], tools: Optional[list[Union[dict, Callable]]] = None, documents: Optional[list[dict[str, str]]] = None, chat_template: Optional[str] = None, add_generation_prompt: bool = False, continue_final_message: bool = False, tokenize: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: bool = False, max_length: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_dict: bool = False, return_assistant_tokens_mask: bool = False, tokenizer_kwargs: Optional[dict[str, Any]] = None, **kwargs, ) -> Union[str, list[int], list[str], list[list[int]], BatchEncoding]: """ Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to determine the format and control tokens to use when converting. Args: conversation (Union[list[dict[str, str]], list[list[dict[str, str]]]]): A list of dicts with "role" and "content" keys, representing the chat history so far. tools (`list[Union[Dict, Callable]]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information. documents (`list[dict[str, str]]`, *optional*): A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing "title" and "text" keys. Please see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG) for examples of passing documents with chat templates. chat_template (`str`, *optional*): A Jinja template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. add_generation_prompt (bool, *optional*): If this is set, a prompt with the token(s) that indicate the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model. Note that this argument will be passed to the chat template, and so it must be supported in the template for this argument to have any effect. continue_final_message (bool, *optional*): If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to "prefill" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. tokenize (`bool`, defaults to `True`): Whether to tokenize the output. If `False`, the output will be a string. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, defaults to `False`): Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. max_length (`int`, *optional*): Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If not specified, the tokenizer's `max_length` attribute will be used as a default. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable values are: - `'tf'`: Return TensorFlow `tf.Tensor` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. return_dict (`bool`, defaults to `False`): Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. tokenizer_kwargs (`dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer. return_assistant_tokens_mask (`bool`, defaults to `False`): Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant, the mask will contain 1. For user and system tokens, the mask will contain 0. This functionality is only available for chat templates that support it via the `{% generation %}` keyword. **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template. Returns: `Union[list[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is set, will return a dict of tokenizer outputs instead. """ if return_dict and not tokenize: raise ValueError( "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict " "of tokenizer outputs to return." ) if return_assistant_tokens_mask and not return_dict: raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`") if tokenizer_kwargs is None: tokenizer_kwargs = {} chat_template = self.get_chat_template(chat_template, tools) if isinstance(conversation, (list, tuple)) and ( isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages") ): conversations = conversation is_batched = True else: conversations = [conversation] is_batched = False if continue_final_message: if add_generation_prompt: raise ValueError( "continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead." ) if return_assistant_tokens_mask: raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.") template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present rendered_chat, generation_indices = render_jinja_template( conversations=conversations, tools=tools, documents=documents, chat_template=chat_template, return_assistant_tokens_mask=return_assistant_tokens_mask, continue_final_message=continue_final_message, add_generation_prompt=add_generation_prompt, **template_kwargs, ) if not is_batched: rendered_chat = rendered_chat[0] if tokenize: out = self( rendered_chat, padding=padding, truncation=truncation, max_length=max_length, add_special_tokens=False, return_tensors=return_tensors, **tokenizer_kwargs, ) if return_dict: if return_assistant_tokens_mask: assistant_masks = [] if is_batched or return_tensors: input_ids = out["input_ids"] else: input_ids = [out["input_ids"]] for i in range(len(input_ids)): current_mask = [0] * len(input_ids[i]) for assistant_start_char, assistant_end_char in generation_indices[i]: start_token = out.char_to_token(i, assistant_start_char) end_token = out.char_to_token(i, assistant_end_char - 1) if start_token is None: # start_token is out of bounds maybe due to truncation. break for token_id in range(start_token, end_token + 1 if end_token else len(input_ids[i])): current_mask[token_id] = 1 assistant_masks.append(current_mask) if not is_batched and not return_tensors: assistant_masks = assistant_masks[0] out["assistant_masks"] = assistant_masks if return_tensors: out.convert_to_tensors(tensor_type=return_tensors) return out else: return out["input_ids"] else: return rendered_chat def encode_message_with_chat_template( self, message: dict[str, str], conversation_history: Optional[list[dict[str, str]]] = None, **kwargs, ) -> list[int]: """ Tokenize a single message. This method is a convenience wrapper around `apply_chat_template` that allows you to tokenize messages one by one. This is useful for things like token-by-token streaming. This method is not guaranteed to be perfect. For some models, it may be impossible to robustly tokenize single messages. For example, if the chat template adds tokens after each message, but also has a prefix that is added to the entire chat, it will be impossible to distinguish a chat-start-token from a message-start-token. In these cases, this method will do its best to find the correct tokenization, but it may not be perfect. **Note:** This method does not support `add_generation_prompt`. If you want to add a generation prompt, you should do it separately after tokenizing the conversation. Args: message (`dict`): A dictionary with "role" and "content" keys, representing the message to tokenize. conversation_history (`list[dict]`, *optional*): A list of dicts with "role" and "content" keys, representing the chat history so far. If you are tokenizing messages one by one, you should pass the previous messages in the conversation here. **kwargs: Additional kwargs to pass to the `apply_chat_template` method. Returns: `list[int]`: A list of token ids representing the tokenized message. """ if "add_generation_prompt" in kwargs: raise ValueError( "`encode_message_with_chat_template` does not support `add_generation_prompt`. Please add the generation prompt " "separately." ) if conversation_history is None or len(conversation_history) == 0: return self.apply_chat_template([message], add_generation_prompt=False, tokenize=True, **kwargs) conversation = conversation_history + [message] tokens = self.apply_chat_template(conversation, add_generation_prompt=False, tokenize=True, **kwargs) prefix_tokens = self.apply_chat_template( conversation_history, add_generation_prompt=False, tokenize=True, **kwargs ) # It's possible that the prefix tokens are not a prefix of the full list of tokens. # For example, if the prefix is `<s>User: Hi` and the full conversation is `<s>User: Hi</s><s>Assistant: Hello`. # In this case, we can't simply find the prefix, so we have to do something a bit more subtle. # We look for the first place where the tokens differ, and that's our split point. # This is not perfect, but it's the best we can do without a token-level API. # To make this more robust, we could do a diff and find the longest common subsequence, but this is # a good first approximation. # This is particularly important for models like Llama3 that have changed their chat template to include # EOS tokens after user messages. min_len = min(len(prefix_tokens), len(tokens)) for i in range(min_len): if prefix_tokens[i] != tokens[i]: return tokens[i:] return tokens[min_len:] def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[list[dict]] = None) -> str: """ Retrieve the chat template string used for tokenizing chat messages. This template is used internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat template for better generation tracking. Args: chat_template (`str`, *optional*): A Jinja template or the name of a template to use for this conversion. It is usually not necessary to pass anything to this argument, as the model's template will be used by default. tools (`list[Dict]`, *optional*): A list of tools (callable functions) that will be accessible to the model. If the template does not support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema, giving the name, description and argument types for the tool. See our [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use) for more information. Returns: `str`: The chat template string. """ # First, handle the cases when the model has a dict of multiple templates if isinstance(self.chat_template, dict): template_dict = self.chat_template if chat_template is not None and chat_template in template_dict: # The user can pass the name of a template to the chat template argument instead of an entire template chat_template = template_dict[chat_template] elif chat_template is None: if tools is not None and "tool_use" in template_dict: chat_template = template_dict["tool_use"] elif "default" in template_dict: chat_template = template_dict["default"] else: raise ValueError( "This model has multiple chat templates with no default specified! Please either pass a chat " "template or the name of the template you wish to use to the `chat_template` argument. Available " f"template names are {sorted(template_dict.keys())}." ) elif chat_template is None: # These are the cases when the model has a single template # priority: `chat_template` argument > `tokenizer.chat_template` if self.chat_template is not None: chat_template = self.chat_template else: raise ValueError( "Cannot use chat template functions because tokenizer.chat_template is not set and no template " "argument was passed! For information about writing templates and setting the " "tokenizer.chat_template attribute, please see the documentation at " "https://huggingface.co/docs/transformers/main/en/chat_templating" ) return chat_template @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", trust_remote_code=False, **kwargs, ): r""" Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined tokenizer. Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g., `./my_model_directory/`. - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g., `./my_model_directory/vocab.txt`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download the vocabulary files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. subfolder (`str`, *optional*): In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for facebook/rag-token-base), specify it here. inputs (additional positional arguments, *optional*): Will be passed along to the Tokenizer `__init__` method. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs (additional keyword arguments, *optional*): Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`. See parameters in the `__init__` for more details. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Examples: ```python # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*) tokenizer = BertTokenizer.from_pretrained("./test/saved_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt") # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>" ```""" resume_download = kwargs.pop("resume_download", None) proxies = kwargs.pop("proxies", None) use_auth_token = kwargs.pop("use_auth_token", None) subfolder = kwargs.pop("subfolder", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) commit_hash = kwargs.pop("_commit_hash", None) gguf_file = kwargs.get("gguf_file") if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) vocab_files = {} init_configuration = {} is_local = os.path.isdir(pretrained_model_name_or_path) single_file_id = None if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1 and not gguf_file: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " "supported for this tokenizer. Use a model identifier or the path to a directory instead." ) warnings.warn( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and " "won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", FutureWarning, ) file_id = list(cls.vocab_files_names.keys())[0] vocab_files[file_id] = pretrained_model_name_or_path single_file_id = file_id else: if gguf_file: vocab_files["vocab_file"] = gguf_file else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name additional_files_names = { "added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy "tokenizer_config_file": TOKENIZER_CONFIG_FILE, # tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders "tokenizer_file": FULL_TOKENIZER_FILE, "chat_template_file": CHAT_TEMPLATE_FILE, } vocab_files = {**cls.vocab_files_names, **additional_files_names} if "tokenizer_file" in vocab_files: # Try to get the tokenizer config to see if there are versioned tokenizer files. fast_tokenizer_file = FULL_TOKENIZER_FILE try: resolved_config_file = cached_file( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, user_agent=user_agent, _raise_exceptions_for_missing_entries=False, _commit_hash=commit_hash, ) except OSError: # Re-raise any error raised by cached_file in order to get a helpful error message raise except Exception: # For any other exception, we throw a generic error. raise OSError( f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing all relevant files for a {cls.__name__} tokenizer." ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) if resolved_config_file is not None: with open(resolved_config_file, encoding="utf-8") as reader: tokenizer_config = json.load(reader) if "fast_tokenizer_files" in tokenizer_config: fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) vocab_files["tokenizer_file"] = fast_tokenizer_file # This block looks for any extra chat template files if is_local: template_dir = Path(pretrained_model_name_or_path, CHAT_TEMPLATE_DIR) if template_dir.is_dir(): for template_file in template_dir.glob("*.jinja"): template_name = template_file.name.removesuffix(".jinja") vocab_files[f"chat_template_{template_name}"] = ( f"{CHAT_TEMPLATE_DIR}/{template_file.name}" ) else: for template in list_repo_templates( pretrained_model_name_or_path, local_files_only=local_files_only, revision=revision, cache_dir=cache_dir, ): vocab_files[f"chat_template_{template}"] = f"{CHAT_TEMPLATE_DIR}/{template}.jinja" # Get files from url, cache, or disk depending on the case resolved_vocab_files = {} for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None elif single_file_id == file_id: if os.path.isfile(file_path): resolved_vocab_files[file_id] = file_path elif is_remote_url(file_path): resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) else: try: resolved_vocab_files[file_id] = cached_file( pretrained_model_name_or_path, file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _commit_hash=commit_hash, ) except OSError: # Re-raise any error raised by cached_file in order to get a helpful error message raise except Exception: # For any other exception, we throw a generic error. raise OSError( f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from " "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing all relevant files for a {cls.__name__} tokenizer." ) commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) for file_id, file_path in vocab_files.items(): if file_id not in resolved_vocab_files: continue if is_local: logger.info(f"loading file {file_path}") else: logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") return cls._from_pretrained( resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=commit_hash, _is_local=is_local, trust_remote_code=trust_remote_code, **kwargs, ) @classmethod def _from_pretrained( cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, token=None, cache_dir=None, local_files_only=False, _commit_hash=None, _is_local=False, trust_remote_code=False, **kwargs, ): # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json # file or if `from_slow` is set to True. from_slow = kwargs.get("from_slow", False) gguf_file = kwargs.get("gguf_file") has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file: slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained( copy.deepcopy(resolved_vocab_files), pretrained_model_name_or_path, copy.deepcopy(init_configuration), *init_inputs, token=token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash, **(copy.deepcopy(kwargs)), ) else: slow_tokenizer = None # Prepare tokenizer initialization kwargs # Did we saved some inputs and kwargs to reload ? tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None) if tokenizer_config_file is not None: with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle: init_kwargs = json.load(tokenizer_config_handle) # First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers. config_tokenizer_class = init_kwargs.get("tokenizer_class") init_kwargs.pop("tokenizer_class", None) if not has_tokenizer_file: init_kwargs.pop("tokenizer_file", None) saved_init_inputs = init_kwargs.pop("init_inputs", ()) if not init_inputs: init_inputs = saved_init_inputs else: config_tokenizer_class = None init_kwargs = init_configuration # If independent chat template file(s) exist, they take priority over template entries in the tokenizer config chat_templates = {} chat_template_file = resolved_vocab_files.pop("chat_template_file", None) extra_chat_templates = [key for key in resolved_vocab_files if key.startswith("chat_template_")] if chat_template_file is not None: with open(chat_template_file, encoding="utf-8") as chat_template_handle: chat_templates["default"] = chat_template_handle.read() for extra_chat_template in extra_chat_templates: template_file = resolved_vocab_files.pop(extra_chat_template, None) if template_file is None: continue # I think this should never happen, but just in case template_name = extra_chat_template.removeprefix("chat_template_") with open(template_file) as chat_template_handle: chat_templates[template_name] = chat_template_handle.read() if len(chat_templates) == 1 and "default" in chat_templates: init_kwargs["chat_template"] = chat_templates["default"] elif chat_templates: init_kwargs["chat_template"] = chat_templates if not _is_local: if "auto_map" in init_kwargs: # For backward compatibility with odl format. if isinstance(init_kwargs["auto_map"], (tuple, list)): init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]} if config_tokenizer_class is None: # Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo. # If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with # AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain. # Maybe we can just remove this entirely? from .models.auto.configuration_auto import AutoConfig # tests_ignore # Second attempt. If we have not yet found tokenizer_class, let's try to use the config. try: config = AutoConfig.from_pretrained( pretrained_model_name_or_path, token=token, cache_dir=cache_dir, local_files_only=local_files_only, trust_remote_code=trust_remote_code, _commit_hash=_commit_hash, ) config_tokenizer_class = config.tokenizer_class except (OSError, ValueError, KeyError): # skip if an error occurred. config = None if config_tokenizer_class is None: # Third attempt. If we have not yet found the original type of the tokenizer, # we are loading we see if we can infer it from the type of the configuration file from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore if hasattr(config, "model_type"): model_type = config.model_type else: # Fallback: use pattern matching on the string. model_type = None for pattern in TOKENIZER_MAPPING_NAMES: if pattern in str(pretrained_model_name_or_path): model_type = pattern break if model_type is not None: config_tokenizer_class, config_tokenizer_class_fast = TOKENIZER_MAPPING_NAMES.get( model_type, (None, None) ) if config_tokenizer_class is None: config_tokenizer_class = config_tokenizer_class_fast if config_tokenizer_class is not None: if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""): logger.warning( "The tokenizer class you load from this checkpoint is not the same type as the class this" " function is called from. It may result in unexpected tokenization. \nThe tokenizer class you" f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called" f" from is '{cls.__name__}'." ) # Update with newly provided kwargs init_kwargs.update(kwargs) # Merge resolved_vocab_files arguments in init_kwargs. added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None) special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None) for args_name, file_path in resolved_vocab_files.items(): if args_name not in init_kwargs: init_kwargs[args_name] = file_path tokenizer_file = resolved_vocab_files.pop("tokenizer_file", None) if slow_tokenizer is not None: init_kwargs["__slow_tokenizer"] = slow_tokenizer init_kwargs["name_or_path"] = pretrained_model_name_or_path #### Handle tokenizer serialization of added and special tokens added_tokens_decoder: dict[int, AddedToken] = {} added_tokens_map: dict[str, AddedToken] = {} # if we have info on the slow added tokens if "added_tokens_decoder" in init_kwargs: for idx, token in init_kwargs["added_tokens_decoder"].items(): if isinstance(token, dict): token = AddedToken(**token) if isinstance(token, AddedToken): added_tokens_decoder[int(idx)] = token added_tokens_map[str(token)] = token else: raise TypeError( f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance" ) else: # begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified if special_tokens_map_file is not None: with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle: special_tokens_map = json.load(special_tokens_map_handle) for key, value in special_tokens_map.items(): if key in kwargs and kwargs[key]: # This value has already been redefined by the kwargs # We keep this new value and ignore the one stored in the special_tokens_map_file continue if isinstance(value, dict): value["special"] = True value = AddedToken(**value) elif key == "additional_special_tokens" and isinstance(value, list): additional_special_tokens = init_kwargs.pop("additional_special_tokens", []) or [] for token in value: if isinstance(token, dict): token["special"] = True token = AddedToken(**token) if token not in additional_special_tokens: additional_special_tokens.append(token) value = additional_special_tokens init_kwargs[key] = value # slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`. # this is for legacy purpose. We don't add the tokens after init for efficiency. if added_tokens_file is not None: special_tokens = [] for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): if init_kwargs[key] is not None: if key == "additional_special_tokens": special_tokens += [str(token) for token in init_kwargs[key]] else: special_tokens.append(str(init_kwargs[key])) with open(added_tokens_file, encoding="utf-8") as added_tokens_handle: added_tok_encoder = json.load(added_tokens_handle) for str_token, index in added_tok_encoder.items(): # if index not in added_tokens_decoder and str_token not in added_tokens_map: special = str_token in special_tokens added_tokens_decoder[index] = AddedToken( str_token, rstrip=False, lstrip=False, normalized=not special, special=special ) added_tokens_map[str(token)] = added_tokens_decoder[index] # allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer # if `tokenizer_config.json` is `None` if tokenizer_file is not None: # This is for slow so can be done before with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle: tokenizer_file_handle = json.load(tokenizer_file_handle) added_tokens = tokenizer_file_handle.pop("added_tokens") for serialized_tokens in added_tokens: idx = serialized_tokens.pop("id") added_tokens_decoder[idx] = AddedToken(**serialized_tokens) added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx] # end legacy # Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken # convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens init_kwargs["added_tokens_decoder"] = added_tokens_decoder init_kwargs = cls.convert_added_tokens(init_kwargs, save=False) for key in cls.SPECIAL_TOKENS_ATTRIBUTES & init_kwargs.keys(): if added_tokens_map != {} and init_kwargs[key] is not None: if key != "additional_special_tokens": init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key]) # Instantiate the tokenizer. try: tokenizer = cls(*init_inputs, **init_kwargs) except import_protobuf_decode_error(): logger.info( "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead." "(Google protobuf error: Tried to load SPM model with non-SPM vocab file).", ) return False except RuntimeError as e: if "sentencepiece_processor.cc" in str(e): logger.info( "Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead." "(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).", ) return False except OSError: raise OSError( "Unable to load vocabulary from file. " "Please check that the provided vocabulary is accessible and not corrupted." ) if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size: logger.info( "Special tokens have been added in the vocabulary, make sure the associated word embeddings are" " fine-tuned or trained." ) return tokenizer @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): # This method should be deleted in Transformers v5 # Its only purpose is to potentially throw a warning # that incorrectly defined max lengths of T5's tokenizer are used # which we will correct in Transformers v5. return max_model_length @classmethod def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True): if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken": obj.pop("__type") return AddedToken(**obj) if isinstance(obj, AddedToken) and save: obj = obj.__getstate__() if add_type_field: obj["__type"] = "AddedToken" else: # Don't save "special" for previous tokenizers obj.pop("special") return obj elif isinstance(obj, (list, tuple)): return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj] elif isinstance(obj, dict): return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()} return obj def save_chat_templates( self, save_directory: Union[str, os.PathLike], tokenizer_config: dict, filename_prefix: Optional[str], save_jinja_files: bool, ): """ Writes chat templates out to the save directory if we're using the new format, and removes them from the tokenizer config if present. If we're using the legacy format, it doesn't write any files, and instead writes the templates to the tokenizer config in the correct format. """ chat_template_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + CHAT_TEMPLATE_FILE ) chat_template_dir = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + CHAT_TEMPLATE_DIR ) saved_raw_chat_template_files = [] if save_jinja_files and isinstance(self.chat_template, str): # New format for single templates is to save them as chat_template.jinja with open(chat_template_file, "w", encoding="utf-8") as f: f.write(self.chat_template) logger.info(f"chat template saved in {chat_template_file}") saved_raw_chat_template_files.append(chat_template_file) if "chat_template" in tokenizer_config: tokenizer_config.pop("chat_template") # To ensure it doesn't somehow end up in the config too elif save_jinja_files and isinstance(self.chat_template, dict): # New format for multiple templates is to save the default as chat_template.jinja # and the other templates in the chat_templates/ directory for template_name, template in self.chat_template.items(): if template_name == "default": with open(chat_template_file, "w", encoding="utf-8") as f: f.write(self.chat_template["default"]) logger.info(f"chat template saved in {chat_template_file}") saved_raw_chat_template_files.append(chat_template_file) else: Path(chat_template_dir).mkdir(exist_ok=True) template_filepath = os.path.join(chat_template_dir, f"{template_name}.jinja") with open(template_filepath, "w", encoding="utf-8") as f: f.write(template) logger.info(f"chat template saved in {template_filepath}") saved_raw_chat_template_files.append(template_filepath) if "chat_template" in tokenizer_config: tokenizer_config.pop("chat_template") # To ensure it doesn't somehow end up in the config too elif isinstance(self.chat_template, dict): # Legacy format for multiple templates: # chat template dicts are saved to the config as lists of dicts with fixed key names. tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()] elif self.chat_template is not None: # Legacy format for single templates: Just make them a key in tokenizer_config.json tokenizer_config["chat_template"] = self.chat_template return tokenizer_config, saved_raw_chat_template_files def save_pretrained( self, save_directory: Union[str, os.PathLike], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, push_to_hub: bool = False, **kwargs, ) -> tuple[str]: """ Save the full tokenizer state. This method make sure the full tokenizer can then be re-loaded using the [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method.. Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for instance, modifying `tokenizer.do_lower_case` after creation). Args: save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved. legacy_format (`bool`, *optional*): Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate added_tokens files. If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be loaded in the corresponding "slow" tokenizer. If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value error is raised. filename_prefix (`str`, *optional*): A prefix to add to the names of the files saved by the tokenizer. push_to_hub (`bool`, *optional*, defaults to `False`): Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the repository you want to push to with `repo_id` (will default to the name of `save_directory` in your namespace). kwargs (`dict[str, Any]`, *optional*): Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. Returns: A tuple of `str`: The files saved. """ use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token") is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") return os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) special_tokens_map_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + SPECIAL_TOKENS_MAP_FILE ) tokenizer_config_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE ) tokenizer_config = copy.deepcopy(self.init_kwargs) # Let's save the init kwargs target_keys = set(self.init_kwargs.keys()) # Let's save the special tokens map (only the strings) target_keys.update(["model_max_length", "clean_up_tokenization_spaces"]) for k in target_keys: if hasattr(self, k): tokenizer_config[k] = getattr(self, k) # Let's make sure we properly save the special tokens tokenizer_config.update(self.special_tokens_map) if "extra_special_tokens" not in tokenizer_config: tokenizer_config["extra_special_tokens"] = self.extra_special_tokens tokenizer_config.update(self.extra_special_tokens) save_jinja_files = kwargs.get("save_jinja_files", True) tokenizer_config, saved_raw_chat_template_files = self.save_chat_templates( save_directory, tokenizer_config, filename_prefix, save_jinja_files ) if len(self.init_inputs) > 0: tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs) for file_id in self.vocab_files_names: tokenizer_config.pop(file_id, None) # no typefields, this way old fast and slow can load it tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True) # Process added tokens separately: allows previous versions to ignore it! added_tokens = {} for key, value in self.added_tokens_decoder.items(): added_tokens[key] = value.__getstate__() tokenizer_config["added_tokens_decoder"] = added_tokens # Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained tokenizer_class = self.__class__.__name__ # Remove the Fast at the end if we can save the slow tokenizer if tokenizer_class.endswith("Fast") and getattr(self, "can_save_slow_tokenizer", False): tokenizer_class = tokenizer_class[:-4] tokenizer_config["tokenizer_class"] = tokenizer_class if getattr(self, "_auto_map", None) is not None: tokenizer_config["auto_map"] = self._auto_map if getattr(self, "_processor_class", None) is not None: tokenizer_config["processor_class"] = self._processor_class # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be # loaded from the Hub. if self._auto_class is not None: custom_object_save(self, save_directory, config=tokenizer_config) # remove private information if "name_or_path" in tokenizer_config: tokenizer_config.pop("name_or_path") tokenizer_config.pop("special_tokens_map_file", None) tokenizer_config.pop("tokenizer_file", None) if "device_map" in tokenizer_config: tokenizer_config.pop("device_map") with open(tokenizer_config_file, "w", encoding="utf-8") as f: out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"tokenizer config file saved in {tokenizer_config_file}") # Sanitize AddedTokens in special_tokens_map # kept for forward compatibility, will be removed in transoformers 5. Typefields are not saved for FC, special should not be save either write_dict = self.convert_added_tokens(self.special_tokens_map_extended, save=True, add_type_field=False) with open(special_tokens_map_file, "w", encoding="utf-8") as f: out_str = json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"Special tokens file saved in {special_tokens_map_file}") file_names = (tokenizer_config_file, special_tokens_map_file, *saved_raw_chat_template_files) save_files = self._save_pretrained( save_directory=save_directory, file_names=file_names, legacy_format=legacy_format, filename_prefix=filename_prefix, ) if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) return save_files def _save_pretrained( self, save_directory: Union[str, os.PathLike], file_names: tuple[str], legacy_format: Optional[bool] = None, filename_prefix: Optional[str] = None, ) -> tuple[str]: """ Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens. Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`] """ if legacy_format is False: raise ValueError( "Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format." ) save_directory = str(save_directory) added_tokens_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE ) # the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size} if added_vocab: with open(added_tokens_file, "w", encoding="utf-8") as f: out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n" f.write(out_str) logger.info(f"added tokens file saved in {added_tokens_file}") vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix) return file_names + vocab_files + (added_tokens_file,) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: """ Save only the vocabulary of the tokenizer (vocabulary + added tokens). This method won't save the configuration and special token mappings of the tokenizer. Use [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer. Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. Returns: `Tuple(str)`: Paths to the files saved. """ raise NotImplementedError def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]: """ Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`. Args: text (`str`): The sequence to be encoded. pair (`str`, *optional*): A second sequence to be encoded with the first. add_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add the special tokens associated with the corresponding model. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific encode method. See details in [`~PreTrainedTokenizerBase.__call__`] Returns: `list[str]`: The list of tokens. """ raise NotImplementedError @add_end_docstrings( ENCODE_KWARGS_DOCSTRING, """ **kwargs: Passed along to the `.tokenize()` method. """, """ Returns: `list[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text. """, ) def encode( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> list[int]: """ Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. Args: text (`str`, `list[str]` or `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ encoded_inputs = self.encode_plus( text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, padding_side=padding_side, return_tensors=return_tensors, **kwargs, ) return encoded_inputs["input_ids"] def num_special_tokens_to_add(self, pair: bool = False) -> int: raise NotImplementedError def _get_padding_truncation_strategies( self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs ): """ Find the correct padding/truncation strategy """ # Backward compatibility for previous behavior, maybe we should deprecate it: # If you only set max_length, it activates truncation for max_length if max_length is not None and padding is False and truncation is None: if verbose: if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False): logger.warning( "Truncation was not explicitly activated but `max_length` is provided a specific value, please" " use `truncation=True` to explicitly truncate examples to max length. Defaulting to" " 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the" " tokenizer you can select this strategy more precisely by providing a specific strategy to" " `truncation`." ) self.deprecation_warnings["Truncation-not-explicitly-activated"] = True truncation = "longest_first" # Get padding strategy if padding is not False: if padding is True: if verbose: if max_length is not None and ( truncation is None or truncation is False or truncation == "do_not_truncate" ): warnings.warn( "`max_length` is ignored when `padding`=`True` and there is no truncation strategy. " "To pad to max length, use `padding='max_length'`." ) padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(padding, PaddingStrategy): padding_strategy = PaddingStrategy(padding) elif isinstance(padding, PaddingStrategy): padding_strategy = padding else: padding_strategy = PaddingStrategy.DO_NOT_PAD # Get truncation strategy if truncation is not False and truncation is not None: if truncation is True: truncation_strategy = ( TruncationStrategy.LONGEST_FIRST ) # Default to truncate the longest sequences in pairs of inputs elif not isinstance(truncation, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation) elif isinstance(truncation, TruncationStrategy): truncation_strategy = truncation else: truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: if self.model_max_length > LARGE_INTEGER: if verbose: if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False): logger.warning( "Asking to pad to max_length but no maximum length is provided and the model has no" " predefined maximum length. Default to no padding." ) self.deprecation_warnings["Asking-to-pad-to-max_length"] = True padding_strategy = PaddingStrategy.DO_NOT_PAD else: max_length = self.model_max_length if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: if self.model_max_length > LARGE_INTEGER: if verbose: if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False): logger.warning( "Asking to truncate to max_length but no maximum length is provided and the model has" " no predefined maximum length. Default to no truncation." ) self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE else: max_length = self.model_max_length # Test if we have a padding token if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0): raise ValueError( "Asking to pad but the tokenizer does not have a padding token. " "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " "or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`." ) # Check that we will truncate to a multiple of pad_to_multiple_of if both are provided if ( truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and padding_strategy != PaddingStrategy.DO_NOT_PAD and pad_to_multiple_of is not None and max_length is not None and (max_length % pad_to_multiple_of != 0) ): raise ValueError( "Truncation and padding are both activated but " f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})." ) return padding_strategy, truncation_strategy, max_length, kwargs @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], None] = None, text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], None] = None, text_pair_target: Optional[ Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] ] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences. Args: text (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_target (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*): The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). """ # To avoid duplicating all_kwargs = { "add_special_tokens": add_special_tokens, "padding": padding, "truncation": truncation, "max_length": max_length, "stride": stride, "is_split_into_words": is_split_into_words, "pad_to_multiple_of": pad_to_multiple_of, "padding_side": padding_side, "return_tensors": return_tensors, "return_token_type_ids": return_token_type_ids, "return_attention_mask": return_attention_mask, "return_overflowing_tokens": return_overflowing_tokens, "return_special_tokens_mask": return_special_tokens_mask, "return_offsets_mapping": return_offsets_mapping, "return_length": return_length, "split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens), "verbose": verbose, } if return_tensors in ("tf", "jax"): logger.warning_once( "TensorFlow and JAX classes are deprecated and will be removed in Transformers v5. We " "recommend migrating to PyTorch classes or pinning your version of Transformers." ) all_kwargs.update(kwargs) if text is None and text_target is None: raise ValueError("You need to specify either `text` or `text_target`.") if text is not None: # The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the # input mode in this case. if not self._in_target_context_manager: self._switch_to_input_mode() encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs) if text_target is not None: self._switch_to_target_mode() target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs) # Leave back tokenizer in input mode self._switch_to_input_mode() if text_target is None: return encodings elif text is None: return target_encodings else: encodings["labels"] = target_encodings["input_ids"] return encodings def _call_one( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if not _is_valid_text_input(text): raise ValueError( "text input must be of type `str` (single example), `list[str]` (batch or single pretokenized example) " "or `list[list[str]]` (batch of pretokenized examples)." ) if text_pair is not None and not _is_valid_text_input(text_pair): raise ValueError( "text input must be of type `str` (single example), `list[str]` (batch or single pretokenized example) " "or `list[list[str]]` (batch of pretokenized examples)." ) if is_split_into_words: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) if is_batched: if isinstance(text_pair, str): raise TypeError( "when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as" " `text`." ) if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: text (`str`, `list[str]` or (for non-fast tokenizers) `list[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `list[str]` or `list[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). """ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=kwargs.pop("split_special_tokens", self.split_special_tokens), **kwargs, ) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: raise NotImplementedError @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair], list[EncodedInput], list[EncodedInputPair], ], add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. <Tip warning={true}> This method is deprecated, `__call__` should be used instead. </Tip> Args: batch_text_or_text_pairs (`list[str]`, `list[tuple[str, str]]`, `list[list[str]]`, `list[tuple[list[str], list[str]]]`, and for not-fast tokenizers, also `list[list[int]]`, `list[tuple[list[int], list[int]]]`): Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus`). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, split_special_tokens=split_special_tokens, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], list[PreTokenizedInputPair], list[EncodedInput], list[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, split_special_tokens: bool = False, **kwargs, ) -> BatchEncoding: raise NotImplementedError def pad( self, encoded_inputs: Union[ BatchEncoding, list[BatchEncoding], dict[str, EncodedInput], dict[str, list[EncodedInput]], list[dict[str, EncodedInput]], ], padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, ) -> BatchEncoding: """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, `self.pad_token_id` and `self.pad_token_type_id`). Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding. <Tip> If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of PyTorch tensors, you will lose the specific device of your tensors however. </Tip> Args: encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`): Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str, list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. Instead of `list[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ if self.__class__.__name__.endswith("Fast"): if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False): logger.warning_advice( f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer," " using the `__call__` method is faster than using a method to encode the text followed by a call" " to the `pad` method to get a padded encoding." ) self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0]} # The model's main input name, usually `input_ids`, has been passed for padding if self.model_input_names[0] not in encoded_inputs: raise ValueError( "You should supply an encoding or a list of encodings to this method " f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}" ) required_input = encoded_inputs[self.model_input_names[0]] if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0): if return_attention_mask: encoded_inputs["attention_mask"] = [] return encoded_inputs # If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch first_element = required_input[0] if isinstance(first_element, (list, tuple)): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. for item in required_input: if len(item) != 0: first_element = item[0] break # At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do. if not isinstance(first_element, (int, list, tuple)): if is_tf_tensor(first_element): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_tensor(first_element): return_tensors = "pt" if return_tensors is None else return_tensors elif isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( f"type of {first_element} unknown: {type(first_element)}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( padding=padding, max_length=max_length, verbose=verbose ) required_input = encoded_inputs[self.model_input_names[0]] if required_input and not isinstance(required_input[0], (list, tuple)): encoded_inputs = self._pad( encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) return BatchEncoding(encoded_inputs, tensor_type=return_tensors) batch_size = len(required_input) assert all(len(v) == batch_size for v in encoded_inputs.values()), ( "Some items in the output dictionary have a different batch size than others." ) if padding_strategy == PaddingStrategy.LONGEST: max_length = max(len(inputs) for inputs in required_input) padding_strategy = PaddingStrategy.MAX_LENGTH batch_outputs = {} for i in range(batch_size): inputs = {k: v[i] for k, v in encoded_inputs.items()} outputs = self._pad( inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) return BatchEncoding(batch_outputs, tensor_type=return_tensors) def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create the token type IDs corresponding to the sequences passed. [What are token type IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of building those. Args: token_ids_0 (`list[int]`): The first tokenized sequence. token_ids_1 (`list[int]`, *optional*): The second tokenized sequence. Returns: `list[int]`: The token type ids. """ cls_len = int(getattr(self, "cls_token_id", None) is not None) sep_len = int(getattr(self, "sep_token_id", None) is not None) if token_ids_1 is None: return [0] * (cls_len + len(token_ids_0) + sep_len) return [0] * (cls_len + len(token_ids_0) + sep_len) + [1] * (len(token_ids_1) + sep_len) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. This implementation does not add special tokens and this method should be overridden in a subclass. Args: token_ids_0 (`list[int]`): The first tokenized sequence. token_ids_1 (`list[int]`, *optional*): The second tokenized sequence. Returns: `list[int]`: The model input with special tokens. """ if token_ids_1 is None: return token_ids_0 return token_ids_0 + token_ids_1 @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def prepare_for_model( self, ids: list[int], pair_ids: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy, None] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, prepend_batch_axis: bool = False, **kwargs, ) -> BatchEncoding: """ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids* different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a combination of arguments will raise an error. Args: ids (`list[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) pair = pair_ids is not None len_ids = len(ids) len_pair_ids = len(pair_ids) if pair else 0 if return_token_type_ids and not add_special_tokens: raise ValueError( "Asking to return token_type_ids while setting add_special_tokens to False " "results in an undefined behavior. Please set add_special_tokens to True or " "set return_token_type_ids to None." ) if ( return_overflowing_tokens and truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is not None ): raise ValueError( "Not possible to return overflowing tokens for pair of sequences with the " "`longest_first`. Please select another truncation strategy than `longest_first`, " "for instance `only_second` or `only_first`." ) # Load from model defaults if return_token_type_ids is None: return_token_type_ids = "token_type_ids" in self.model_input_names if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names encoded_inputs = {} # Compute the total size of the returned encodings total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) # Truncation: Handle max sequence length overflowing_tokens = [] if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: ids, pair_ids, overflowing_tokens = self.truncate_sequences( ids, pair_ids=pair_ids, num_tokens_to_remove=total_len - max_length, truncation_strategy=truncation_strategy, stride=stride, ) if return_overflowing_tokens: encoded_inputs["overflowing_tokens"] = overflowing_tokens encoded_inputs["num_truncated_tokens"] = total_len - max_length # Add special tokens if add_special_tokens: sequence = self.build_inputs_with_special_tokens(ids, pair_ids) token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) else: sequence = ids + pair_ids if pair else ids token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) # Build output dictionary encoded_inputs["input_ids"] = sequence if return_token_type_ids: encoded_inputs["token_type_ids"] = token_type_ids if return_special_tokens_mask: if add_special_tokens: encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) else: encoded_inputs["special_tokens_mask"] = [0] * len(sequence) # Check lengths self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) # Padding if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: encoded_inputs = self.pad( encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask, ) if return_length: encoded_inputs["length"] = len(encoded_inputs["input_ids"]) batch_outputs = BatchEncoding( encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis ) return batch_outputs def truncate_sequences( self, ids: list[int], pair_ids: Optional[list[int]] = None, num_tokens_to_remove: int = 0, truncation_strategy: Union[str, TruncationStrategy] = "longest_first", stride: int = 0, ) -> tuple[list[int], list[int], list[int]]: """ Truncates a sequence pair in-place following the strategy. Args: ids (`list[int]`): Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. pair_ids (`list[int]`, *optional*): Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. num_tokens_to_remove (`int`, *optional*, defaults to 0): Number of tokens to remove using the truncation strategy. truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`): The strategy to follow for truncation. Can be: - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). stride (`int`, *optional*, defaults to 0): If set to a positive number, the overflowing tokens returned will contain some tokens from the main sequence returned. The value of this argument defines the number of additional tokens. Returns: `tuple[list[int], list[int], list[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair of sequences (or a batch of pairs) is provided. """ if num_tokens_to_remove <= 0: return ids, pair_ids, [] if not isinstance(truncation_strategy, TruncationStrategy): truncation_strategy = TruncationStrategy(truncation_strategy) overflowing_tokens = [] if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None ): if len(ids) > num_tokens_to_remove: window_len = min(len(ids), stride + num_tokens_to_remove) if self.truncation_side == "left": overflowing_tokens = ids[:window_len] ids = ids[num_tokens_to_remove:] elif self.truncation_side == "right": overflowing_tokens = ids[-window_len:] ids = ids[:-num_tokens_to_remove] else: raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.") else: error_msg = ( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the first sequence has a length {len(ids)}. " ) if truncation_strategy == TruncationStrategy.ONLY_FIRST: error_msg = ( error_msg + "Please select another truncation strategy than " f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." ) logger.error(error_msg) elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: logger.warning( "Be aware, overflowing tokens are not returned for the setting you have chosen," f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " "truncation strategy. So the returned list will always be empty even if some " "tokens have been removed." ) len_pair_ids = len(pair_ids) if pair_ids is not None else 0 len_ids = len(ids) first_remove = min(abs(len_pair_ids - len_ids), num_tokens_to_remove) second_remove = num_tokens_to_remove - first_remove if len_ids > len_pair_ids: ids_to_move = first_remove + second_remove // 2 pair_ids_to_move = second_remove - second_remove // 2 else: ids_to_move = second_remove // 2 pair_ids_to_move = first_remove + second_remove - (second_remove // 2) if self.truncation_side == "right": ids = ids[:-ids_to_move] if ids_to_move > 0 else ids pair_ids = pair_ids[:-pair_ids_to_move] if pair_ids is not None and pair_ids_to_move > 0 else pair_ids elif self.truncation_side == "left": ids = ids[ids_to_move:] pair_ids = pair_ids[pair_ids_to_move:] if pair_ids is not None else None else: raise ValueError(f"invalid truncation strategy:{self.truncation_side}") elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: if len(pair_ids) > num_tokens_to_remove: window_len = min(len(pair_ids), stride + num_tokens_to_remove) if self.truncation_side == "right": overflowing_tokens = pair_ids[-window_len:] pair_ids = pair_ids[:-num_tokens_to_remove] elif self.truncation_side == "left": overflowing_tokens = pair_ids[:window_len] pair_ids = pair_ids[num_tokens_to_remove:] else: raise ValueError(f"invalid truncation strategy:{self.truncation_side}") else: logger.error( f"We need to remove {num_tokens_to_remove} to truncate the input " f"but the second sequence has a length {len(pair_ids)}. " f"Please select another truncation strategy than {truncation_strategy}, " "for instance 'longest_first' or 'only_first'." ) return (ids, pair_ids, overflowing_tokens) def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in `padding_side` argument: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError(f"Invalid padding strategy:{padding_side}") return encoded_inputs def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we often want to remove sub-word tokenization artifacts at the same time. Args: tokens (`list[str]`): The token to join in a string. Returns: `str`: The joined tokens. """ raise NotImplementedError def batch_decode( self, sequences: Union[list[int], list[list[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, **kwargs, ) -> list[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `list[str]`: The list of decoded sentences. """ return [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) for seq in sequences ] def decode( self, token_ids: Union[int, list[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) def _decode( self, token_ids: Union[int, list[int]], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, **kwargs, ) -> str: raise NotImplementedError def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`list[int]`): List of ids of the first sequence. token_ids_1 (`list[int]`, *optional*): List of ids of the second sequence. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ assert already_has_special_tokens and token_ids_1 is None, ( "You cannot use ``already_has_special_tokens=False`` with this tokenizer. " "Please use a slow (full python) tokenizer to activate this argument. " "Or set `return_special_tokens_mask=True` when calling the encoding method " "to get the special tokens mask in any tokenizer. " ) all_special_ids = self.all_special_ids # cache the property special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] return special_tokens_mask @staticmethod def clean_up_tokenization(out_string: str) -> str: """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. Args: out_string (`str`): The text to clean up. Returns: `str`: The cleaned-up string. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string def _eventual_warn_about_too_long_sequence(self, ids: list[int], max_length: Optional[int], verbose: bool): """ Depending on the input and internal state we might trigger a warning about a sequence that is too long for its corresponding model Args: ids (`list[str]`): The ids produced by the tokenization max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set) verbose (`bool`): Whether or not to print more information and warnings. """ if max_length is None and len(ids) > self.model_max_length and verbose and self.model_max_length != 0: if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False): logger.warning( "Token indices sequence length is longer than the specified maximum sequence length " f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model " "will result in indexing errors" ) self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True def _switch_to_input_mode(self): """ Private method to put the tokenizer in input mode (when it has different modes for input/outputs) """ pass def _switch_to_target_mode(self): """ Private method to put the tokenizer in target mode (when it has different modes for input/outputs) """ pass @contextmanager def as_target_tokenizer(self): """ Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels. """ warnings.warn( "`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your " "labels by using the argument `text_target` of the regular `__call__` method (either in the same call as " "your input texts if you use the same keyword arguments, or in a separate call." ) self._switch_to_target_mode() self._in_target_context_manager = True yield self._in_target_context_manager = False self._switch_to_input_mode() @classmethod def register_for_auto_class(cls, auto_class="AutoTokenizer"): """ Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the library are already mapped with `AutoTokenizer`. Args: auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`): The auto class to register this new tokenizer with. """ if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class def prepare_seq2seq_batch( self, src_texts: list[str], tgt_texts: Optional[list[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: Optional[str] = None, truncation: bool = True, **kwargs, ) -> BatchEncoding: """ Prepare model inputs for translation. For best performance, translate one sentence at a time. Arguments: src_texts (`list[str]`): List of documents to summarize or source language texts. tgt_texts (`list`, *optional*): List of summaries or target language texts. max_length (`int`, *optional*): Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (`int`, *optional*): Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to `None`, this will use the max_length value. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to `self.__call__`. Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts. The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ # docstyle-ignore formatted_warning = """ `prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular `__call__` method to prepare your inputs and targets. Here is a short example: model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...) If you either need to use different keyword arguments for the source and target texts, you should do two calls like this: model_inputs = tokenizer(src_texts, ...) labels = tokenizer(text_target=tgt_texts, ...) model_inputs["labels"] = labels["input_ids"] See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice. For a more complete example, see the implementation of `prepare_seq2seq_batch`. """ warnings.warn(formatted_warning, FutureWarning) # mBART-specific kwargs that should be ignored by other models. kwargs.pop("src_lang", None) kwargs.pop("tgt_lang", None) if max_length is None: max_length = self.model_max_length model_inputs = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length with self.as_target_tokenizer(): labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) model_inputs["labels"] = labels["input_ids"] return model_inputs def get_fast_tokenizer_file(tokenization_files: list[str]) -> str: """ Get the tokenization file to use for this version of transformers. Args: tokenization_files (`list[str]`): The list of available configuration files. Returns: `str`: The tokenization file to use. """ tokenizer_files_map = {} for file_name in tokenization_files: search = _re_tokenizer_file.search(file_name) if search is not None: v = search.groups()[0] tokenizer_files_map[v] = file_name available_versions = sorted(tokenizer_files_map.keys()) # Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions. tokenizer_file = FULL_TOKENIZER_FILE transformers_version = version.parse(__version__) for v in available_versions: if version.parse(v) <= transformers_version: tokenizer_file = tokenizer_files_map[v] else: # No point going further since the versions are sorted. break return tokenizer_file # To update the docstring, we need to copy the method, otherwise we change the original docstring. PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub) if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None: PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format( object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files" )
transformers/src/transformers/tokenization_utils_base.py/0
{ "file_path": "transformers/src/transformers/tokenization_utils_base.py", "repo_id": "transformers", "token_count": 91378 }
575
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073] OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
transformers/src/transformers/utils/constants.py/0
{ "file_path": "transformers/src/transformers/utils/constants.py", "repo_id": "transformers", "token_count": 162 }
576
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class GraniteSpeechFeatureExtractor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"]) class GraniteSpeechProcessor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"]) class MusicgenMelodyFeatureExtractor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"]) class MusicgenMelodyProcessor(metaclass=DummyObject): _backends = ["torchaudio"] def __init__(self, *args, **kwargs): requires_backends(self, ["torchaudio"])
transformers/src/transformers/utils/dummy_torchaudio_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_torchaudio_objects.py", "repo_id": "transformers", "token_count": 318 }
577
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with package versions """ import importlib.metadata import operator import re import sys from typing import Optional from packaging import version ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version(requirement: str, hint: Optional[str] = None) -> None: """ Perform a runtime check of the dependency versions, using the exact same syntax used by pip. The installed module version comes from the *site-packages* dir via *importlib.metadata*. Args: requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" hint (`str`, *optional*): what suggestion to print in case of requirements not being met Example: ```python require_version("pandas>1.1.2") require_version("numpy>1.18.5", "this is important to have for whatever reason") ```""" hint = f"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") # there could be multiple requirements wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") # special case if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return # check if any version is installed try: got_ver = importlib.metadata.version(pkg) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def require_version_core(requirement): """require_version wrapper which emits a core-specific hint on failure""" hint = "Try: `pip install transformers -U` or `pip install -e '.[dev]'` if you're working with git main" return require_version(requirement, hint)
transformers/src/transformers/utils/versions.py/0
{ "file_path": "transformers/src/transformers/utils/versions.py", "repo_id": "transformers", "token_count": 1705 }
578
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile from inspect import signature import pytest from parameterized import parameterized from transformers import set_seed from transformers.testing_utils import ( is_flaky, require_flash_attn, require_torch_gpu, slow, ) from .test_configuration_common import ConfigTester from .test_modeling_common import ( GenerationTesterMixin, ModelTesterMixin, ids_tensor, is_torch_available, require_torch, torch_device, ) from .test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch class CausalLMModelTester: _required_attributes = ("base_model_class", "config_class", "causal_lm_class") forced_config_args = [ "pad_token_id" ] # Arguments that should be passed to the config class even if not in its signature config_class = None base_model_class = None causal_lm_class = None sequence_classification_class = None token_classification_class = None question_answering_class = None def _verify_model_attributes(self): for required_attribute in self._required_attributes: if getattr(self, required_attribute) is None: raise ValueError( f"You have inherited from CausalLMModelTester but did not set the {required_attribute} attribute." ) @property def all_model_classes(self): return [ model_class for model_class in ( self.base_model_class, self.causal_lm_class, self.sequence_classification_class, self.token_classification_class, ) if model_class is not None ] def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, bos_token_id=1, eos_token_id=2, is_decoder=False, scope=None, expert_interval=1, moe_layer_start_index=0, moe_intermediate_size=12, shared_expert_intermediate_size=36, shared_expert_gate=True, moe_num_shared_experts=2, num_experts_per_tok=2, num_experts=8, mamba_n_groups=1, mamba_n_heads=16, mamba_d_state=16, mamba_d_conv=4, mamba_expand=2, mamba_chunk_size=16, ): self._verify_model_attributes() self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.scope = scope self.head_dim = self.hidden_size // self.num_attention_heads self.is_decoder = is_decoder self.expert_interval = expert_interval self.moe_layer_start_index = moe_layer_start_index self.moe_intermediate_size = moe_intermediate_size self.shared_expert_intermediate_size = shared_expert_intermediate_size self.shared_expert_gate = shared_expert_gate self.moe_num_shared_experts = moe_num_shared_experts self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.mamba_n_groups = mamba_n_groups self.mamba_n_heads = mamba_n_heads self.mamba_d_state = mamba_d_state self.mamba_d_conv = mamba_d_conv self.mamba_expand = mamba_expand self.mamba_chunk_size = mamba_chunk_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @property def config_args(self): return list(signature(self.config_class.__init__).parameters.keys()) def get_config(self): kwargs = {} model_name_to_common_name = {v: k for k, v in self.config_class.attribute_map.items()} for k in self.config_args + self.forced_config_args: if hasattr(self, k) and k != "self": kwargs[k] = getattr(self, k) elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]): kwargs[k] = getattr(self, model_name_to_common_name[k]) return self.config_class(**kwargs) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = self.base_model_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CausalLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin): test_headmasking = False test_pruning = False model_tester_class = None all_model_classes = None rotary_embedding_layer = None # Enables RoPE tests if set pipeline_model_mapping = None def setUp(self): if self.model_tester_class is None: raise ValueError( "You have inherited from CausalLMModelTest but did not set the model_tester_class attribute." ) self.model_tester = self.model_tester_class(self) self.config_tester = ConfigTester(self, config_class=self.model_tester.config_class) if self.all_model_classes is None: self.all_model_classes = self.model_tester.all_model_classes if self.pipeline_model_mapping is None: raise ValueError( "You have inherited from CausalLMModelTest but did not set the pipeline_model_mapping attribute." ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_sequence_classification_model(self): if self.model_tester.sequence_classification_class is None: self.skipTest("Model does not support sequence classification") config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = self.model_tester.sequence_classification_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_sequence_classification_model_for_single_label(self): if self.model_tester.sequence_classification_class is None: self.skipTest("Model does not support sequence classification") config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = self.model_tester.sequence_classification_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_sequence_classification_model_for_multi_label(self): if self.model_tester.sequence_classification_class is None: self.skipTest("Model does not support sequence classification") config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = self.model_tester.sequence_classification_class(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_token_classification_model(self): if self.model_tester.token_classification_class is None: self.skipTest("Model does not support token classification") config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) token_labels = ids_tensor([self.model_tester.batch_size, self.model_tester.seq_length], config.num_labels) model = self.model_tester.token_classification_class(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=token_labels) self.assertEqual( result.logits.shape, (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), ) @parameterized.expand([("linear",), ("dynamic",), ("yarn",)]) def test_model_rope_scaling_from_config(self, scaling_type): if self.rotary_embedding_layer is None: self.skipTest("Rotary embedding layer not set") config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = self.model_tester_class.base_model_class(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = self.model_tester_class.base_model_class(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) def test_model_rope_scaling(self): if self.rotary_embedding_layer is None: self.skipTest("Rotary embedding layer not set") config, _ = self.model_tester.prepare_config_and_inputs_for_common() scaling_factor = 10 short_input_length = 10 long_input_length = int(config.max_position_embeddings * 1.5) # Inputs x = torch.randn( 1, dtype=torch.float32, device=torch_device ) # used exclusively to get the dtype and the device position_ids_short = torch.arange(short_input_length, dtype=torch.long, device=torch_device) position_ids_short = position_ids_short.unsqueeze(0) position_ids_long = torch.arange(long_input_length, dtype=torch.long, device=torch_device) position_ids_long = position_ids_long.unsqueeze(0) # Sanity check original RoPE original_rope = self.rotary_embedding_layer(config=config).to(torch_device) original_cos_short, original_sin_short = original_rope(x, position_ids_short) original_cos_long, original_sin_long = original_rope(x, position_ids_long) torch.testing.assert_close(original_cos_short, original_cos_long[:, :short_input_length, :]) torch.testing.assert_close(original_sin_short, original_sin_long[:, :short_input_length, :]) # Sanity check linear RoPE scaling # New position "x" should match original position with index "x/scaling_factor" config.rope_scaling = {"type": "linear", "factor": scaling_factor} linear_scaling_rope = self.rotary_embedding_layer(config=config).to(torch_device) linear_cos_short, linear_sin_short = linear_scaling_rope(x, position_ids_short) linear_cos_long, linear_sin_long = linear_scaling_rope(x, position_ids_long) torch.testing.assert_close(linear_cos_short, linear_cos_long[:, :short_input_length, :]) torch.testing.assert_close(linear_sin_short, linear_sin_long[:, :short_input_length, :]) for new_position in range(0, long_input_length, scaling_factor): original_position = int(new_position // scaling_factor) torch.testing.assert_close(linear_cos_long[:, new_position, :], original_cos_long[:, original_position, :]) torch.testing.assert_close(linear_sin_long[:, new_position, :], original_sin_long[:, original_position, :]) # Sanity check Dynamic NTK RoPE scaling # Scaling should only be observed after a long input is fed. We can observe that the frequencies increase # with scaling_factor (or that `inv_freq` decreases) config.rope_scaling = {"type": "dynamic", "factor": scaling_factor} ntk_scaling_rope = self.rotary_embedding_layer(config=config).to(torch_device) ntk_cos_short, ntk_sin_short = ntk_scaling_rope(x, position_ids_short) ntk_cos_long, ntk_sin_long = ntk_scaling_rope(x, position_ids_long) torch.testing.assert_close(ntk_cos_short, original_cos_short) torch.testing.assert_close(ntk_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(ntk_sin_long, original_sin_long) self.assertTrue((ntk_scaling_rope.inv_freq <= original_rope.inv_freq).all()) # Sanity check Yarn RoPE scaling # Scaling should be over the entire input config.rope_scaling = {"type": "yarn", "factor": scaling_factor} yarn_scaling_rope = self.rotary_embedding_layer(config=config).to(torch_device) yarn_cos_short, yarn_sin_short = yarn_scaling_rope(x, position_ids_short) yarn_cos_long, yarn_sin_long = yarn_scaling_rope(x, position_ids_long) torch.testing.assert_close(yarn_cos_short, yarn_cos_long[:, :short_input_length, :]) torch.testing.assert_close(yarn_sin_short, yarn_sin_long[:, :short_input_length, :]) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_short, original_cos_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_short, original_sin_short) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_cos_long, original_cos_long) with self.assertRaises(AssertionError): torch.testing.assert_close(yarn_sin_long, original_sin_long) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @is_flaky() @slow def test_flash_attn_2_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(reason="Model does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16, attn_implementation="eager") model.to(torch_device) dummy_input = inputs_dict[model_class.main_input_name] dummy_input = dummy_input.to(torch_device) outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] torch.testing.assert_close(logits_fa, logits, atol=3e-2, rtol=3e-2)
transformers/tests/causal_lm_tester.py/0
{ "file_path": "transformers/tests/causal_lm_tester.py", "repo_id": "transformers", "token_count": 8957 }
579
{ "feature_extractor_type": "Wav2Vec2FeatureExtractor", "processor_class": "Wav2Vec2Processor" }
transformers/tests/fixtures/preprocessor_config.json/0
{ "file_path": "transformers/tests/fixtures/preprocessor_config.json", "repo_id": "transformers", "token_count": 41 }
580
# Copyright 2023 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from queue import Empty from threading import Thread from unittest.mock import patch import pytest from transformers import ( AsyncTextIteratorStreamer, AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available, ) from transformers.testing_utils import CaptureStdout, require_torch, torch_device from transformers.utils.logging import _get_library_root_logger from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class StreamerTester(unittest.TestCase): def test_text_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, greedy_text) def test_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = TextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) def test_text_streamer_skip_prompt(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) new_greedy_ids = greedy_ids[:, input_ids.shape[1] :] new_greedy_text = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_prompt=True) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) # The greedy text should be printed to stdout, except for the final "\n" in the streamer streamer_text = cs.out[:-1] self.assertEqual(streamer_text, new_greedy_text) def test_text_streamer_decode_kwargs(self): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id root = _get_library_root_logger() with patch.object(root, "propagate", False): with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_special_tokens=True) model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token streamer_text = cs.out[:-1] # Remove the final "\n" streamer_text_tokenized = tokenizer(streamer_text, return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1)) def test_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = TextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(Empty): streamer_text = "" for new_text in streamer: streamer_text += new_text @require_torch @pytest.mark.asyncio(loop_scope="class") class AsyncStreamerTester(unittest.IsolatedAsyncioTestCase): async def test_async_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = AsyncTextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" async for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) async def test_async_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = AsyncTextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() # The streamer will timeout after 0.001 seconds, so TimeoutError will be raised with self.assertRaises(TimeoutError): streamer_text = "" async for new_text in streamer: streamer_text += new_text
transformers/tests/generation/test_streamers.py/0
{ "file_path": "transformers/tests/generation/test_streamers.py", "repo_id": "transformers", "token_count": 3297 }
581
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Autoformer model.""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from transformers.utils import check_torch_load_is_safe from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class AutoformerModelTester: def __init__( self, parent, d_model=16, batch_size=13, prediction_length=7, context_length=14, label_length=10, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], moving_average=25, autocorrelation_factor=5, ): self.d_model = d_model self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = context_length self.decoder_seq_length = prediction_length + label_length self.label_length = label_length self.moving_average = moving_average self.autocorrelation_factor = autocorrelation_factor def get_config(self): return AutoformerConfig( d_model=self.d_model, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, label_length=self.label_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], moving_average=self.moving_average, scaling="std", # we need std to get non-zero `loc` ) def prepare_autoformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 # decoder inputs future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_autoformer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = AutoformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = AutoformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, feature, _, _, _ = model.create_network_inputs(**inputs_dict) seasonal_input, trend_input = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...]) enc_input = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]), dim=-1, ) encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) mean = ( torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1) .unsqueeze(1) .repeat(1, config.prediction_length, 1) ) zeros = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, ) dec_input = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1), feature[:, config.context_length - config.label_length :, ...], ), dim=-1, ) trend_init = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1), feature[:, config.context_length - config.label_length :, ...], ), dim=-1, ) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = AutoformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( trend=trend_init, inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class AutoformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AutoformerModel} if is_torch_available() else {} test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False def setUp(self): self.model_tester = AutoformerModelTester(self) self.config_tester = ConfigTester(self, config_class=AutoformerConfig, has_text_modality=False) # TODO: (ydshieh) Fix the wrong logic for `tmp_delay` is possible @unittest.skip( reason="The computation of `tmp_delay` in `AutoformerAttention.forward` seems wrong, see PR #12345. Also `topk` is used to compute indices which is not stable." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @unittest.skip(reason="Model has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass # # Input is 'static_categorical_features' not 'input_ids' def test_model_main_input_name(self): model_signature = inspect.signature(getattr(AutoformerModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(AutoformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask") expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) d_model = getattr(self.model_tester, "d_model", None) num_attention_heads = getattr(self.model_tester, "num_attention_heads", None) dim = d_model // num_attention_heads for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], ) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") check_torch_load_is_safe() batch = torch.load(file, map_location=torch_device, weights_only=True) return batch @require_torch @slow class AutoformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch() with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], )[0] expected_shape = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([3130.6763, 4056.5293, 7053.0786], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1, atol=1e-1)
transformers/tests/models/autoformer/test_modeling_autoformer.py/0
{ "file_path": "transformers/tests/models/autoformer/test_modeling_autoformer.py", "repo_id": "transformers", "token_count": 9049 }
582
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Bros model.""" import copy import unittest from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BrosConfig, BrosForTokenClassification, BrosModel, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, ) class BrosModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_bbox_first_token_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_bbox_first_token_mask = use_bbox_first_token_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 8], 1) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) bbox_first_token_mask = None if self.use_bbox_first_token_mask: bbox_first_token_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.bool).to(torch_device) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) initial_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) subsequent_token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) def get_config(self): return BrosConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): model = BrosModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_spade_ee_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeEEForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, initial_token_labels=token_labels, subsequent_token_labels=token_labels, ) self.parent.assertEqual(result.initial_token_logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual( result.subsequent_token_logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1) ) def create_and_check_for_spade_el_token_classification( self, config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ): config.num_labels = self.num_labels model = BrosSpadeELForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, attention_mask=input_mask, bbox_first_token_mask=bbox_first_token_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.seq_length + 1)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, token_type_ids, input_mask, bbox_first_token_mask, token_labels, initial_token_labels, subsequent_token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class BrosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( BrosForTokenClassification, BrosSpadeEEForTokenClassification, BrosSpadeELForTokenClassification, BrosModel, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"feature-extraction": BrosModel, "token-classification": BrosForTokenClassification} if is_torch_available() else {} ) # BROS requires `bbox` in the inputs which doesn't fit into the above 2 pipelines' input formats. # see https://github.com/huggingface/transformers/pull/26294 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True def setUp(self): self.model_tester = BrosModelTester(self) self.config_tester = ConfigTester(self, config_class=BrosConfig, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if return_labels: if model_class.__name__ in ["BrosForTokenClassification", "BrosSpadeELForTokenClassification"]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) elif model_class.__name__ in ["BrosSpadeEEForTokenClassification"]: inputs_dict["initial_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["subsequent_token_labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["bbox_first_token_mask"] = torch.ones( [self.model_tester.batch_size, self.model_tester.seq_length], dtype=torch.bool, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_spade_ee_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_ee_token_classification(*config_and_inputs) def test_for_spade_el_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_spade_el_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "jinho8345/bros-base-uncased" model = BrosModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_bros_batch_inputs(): attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) bbox = torch.tensor( [ [ [0.0000, 0.0000, 0.0000, 0.0000], [0.5223, 0.5590, 0.5787, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.5853, 0.5590, 0.6864, 0.5720], [0.1234, 0.5700, 0.2192, 0.5840], [0.2231, 0.5680, 0.2782, 0.5780], [0.2874, 0.5670, 0.3333, 0.5780], [0.3425, 0.5640, 0.4344, 0.5750], [0.0866, 0.7770, 0.1181, 0.7870], [0.1168, 0.7770, 0.1522, 0.7850], [0.1535, 0.7750, 0.1864, 0.7850], [0.1890, 0.7750, 0.2572, 0.7850], [1.0000, 1.0000, 1.0000, 1.0000], ], [ [0.0000, 0.0000, 0.0000, 0.0000], [0.4396, 0.6720, 0.4659, 0.6850], [0.4698, 0.6720, 0.4843, 0.6850], [0.1575, 0.6870, 0.2021, 0.6980], [0.2047, 0.6870, 0.2730, 0.7000], [0.1299, 0.7010, 0.1430, 0.7140], [0.1299, 0.7010, 0.1430, 0.7140], [0.1562, 0.7010, 0.2441, 0.7120], [0.1562, 0.7010, 0.2441, 0.7120], [0.2454, 0.7010, 0.3150, 0.7120], [0.3176, 0.7010, 0.3320, 0.7110], [0.3333, 0.7000, 0.4029, 0.7140], [1.0000, 1.0000, 1.0000, 1.0000], ], ] ) input_ids = torch.tensor( [ [101, 1055, 8910, 1012, 5719, 3296, 5366, 3378, 2146, 2846, 10807, 13494, 102], [101, 2112, 1997, 3671, 6364, 1019, 1012, 5057, 1011, 4646, 2030, 2974, 102], ] ) return input_ids, bbox, attention_mask @require_torch class BrosModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = BrosModel.from_pretrained("jinho8345/bros-base-uncased").to(torch_device) input_ids, bbox, attention_mask = prepare_bros_batch_inputs() with torch.no_grad(): outputs = model( input_ids.to(torch_device), bbox.to(torch_device), attention_mask=attention_mask.to(torch_device), return_dict=True, ) # verify the logits expected_shape = torch.Size((2, 13, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.3074, 0.1363, 0.3143], [0.0925, -0.1155, 0.1050], [0.0221, 0.0003, 0.1285]] ).to(torch_device) torch.set_printoptions(sci_mode=False) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/bros/test_modeling_bros.py/0
{ "file_path": "transformers/tests/models/bros/test_modeling_bros.py", "repo_id": "transformers", "token_count": 8385 }
583
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class ChineseCLIPProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = ChineseCLIPProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "的", "价", "格", "是", "15", "便", "alex", "##andra", ",", "。", "-", "t", "shirt", ] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": {"height": 224, "width": 224}, "do_center_crop": True, "crop_size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], "do_convert_rgb": True, } cls.image_processor_file = os.path.join(cls.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(cls.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) tokenizer = cls.get_tokenizer() image_processor = cls.get_image_processor() processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) processor.save_pretrained(cls.tmpdirname) @classmethod def get_tokenizer(cls, **kwargs): return BertTokenizer.from_pretrained(cls.tmpdirname, **kwargs) @classmethod def get_rust_tokenizer(cls, **kwargs): return BertTokenizerFast.from_pretrained(cls.tmpdirname, **kwargs) @classmethod def get_image_processor(cls, **kwargs): return ChineseCLIPImageProcessor.from_pretrained(cls.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() with tempfile.TemporaryDirectory() as tmpdir: processor_slow = ChineseCLIPProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(tmpdir) processor_slow = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = ChineseCLIPProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(tmpdir) processor_fast = ChineseCLIPProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, ChineseCLIPImageProcessor) self.assertIsInstance(processor_fast.image_processor, ChineseCLIPImageProcessor) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = ChineseCLIPProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(cls_token="(CLS)", sep_token="(SEP)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False) processor = ChineseCLIPProcessor.from_pretrained( tmpdir, cls_token="(CLS)", sep_token="(SEP)", do_normalize=False ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, ChineseCLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "Alexandra,T-shirt的价格是15便士。" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "Alexandra,T-shirt的价格是15便士。" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = ChineseCLIPProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/chinese_clip/test_processing_chinese_clip.py/0
{ "file_path": "transformers/tests/models/chinese_clip/test_processing_chinese_clip.py", "repo_id": "transformers", "token_count": 3344 }
584
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import shutil import tempfile import unittest from transformers import ClvpFeatureExtractor, ClvpProcessor, ClvpTokenizer from transformers.testing_utils import require_torch from .test_feature_extraction_clvp import floats_list @require_torch class ClvpProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "susnato/clvp_dev" self.tmpdirname = tempfile.mkdtemp() def tearDown(self): super().tearDown() shutil.rmtree(self.tmpdirname) gc.collect() # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.get_tokenizer with Whisper->Clvp def get_tokenizer(self, **kwargs): return ClvpTokenizer.from_pretrained(self.checkpoint, **kwargs) # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.get_feature_extractor with Whisper->Clvp def get_feature_extractor(self, **kwargs): return ClvpFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.test_save_load_pretrained_default with Whisper->Clvp def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = ClvpProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, ClvpTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor) # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.test_feature_extractor with Whisper->Clvp,processor(raw_speech->processor(raw_speech=raw_speech def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech=raw_speech, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer with Whisper->Clvp def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) # Copied from transformers.tests.models.whisper.test_processing_whisper.WhisperProcessorTest.test_tokenizer_decode with Whisper->Clvp def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = ClvpProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_save_load_pretrained_additional_features(self): processor = ClvpProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(pad_token="(PAD)") feature_extractor_add_kwargs = self.get_feature_extractor(sampling_rate=16000) processor = ClvpProcessor.from_pretrained( self.tmpdirname, pad_token="(PAD)", sampling_rate=16000, ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, ClvpTokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, ClvpFeatureExtractor)
transformers/tests/models/clvp/test_processing_clvp.py/0
{ "file_path": "transformers/tests/models/clvp/test_processing_clvp.py", "repo_id": "transformers", "token_count": 1994 }
585
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DAB-DETR model.""" import inspect import math import unittest from transformers import DabDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_timm, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( DabDetrForObjectDetection, DabDetrModel, ) if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class DabDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, min_size=200, max_size=200, n_targets=8, num_labels=91, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.n_targets = n_targets self.num_labels = num_labels # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DabDetrConfig( hidden_size=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, use_timm_backbone=False, backbone_config=resnet_config, backbone=None, use_pretrained_backbone=False, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_dab_detr_model(self, config, pixel_values, pixel_mask, labels): model = DabDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) ) def create_and_check_dab_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DabDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DabDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DabDetrModel, DabDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( { "image-feature-extraction": DabDetrModel, "object-detection": DabDetrForObjectDetection, } if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False zero_init_hidden_state = True test_torch_exportable = True # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ in ["DabDetrForObjectDetection"]: labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.min_size, self.model_tester.max_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DabDetrModelTester(self) self.config_tester = ConfigTester(self, config_class=DabDetrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_dab_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dab_detr_model(*config_and_inputs) def test_dab_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dab_detr_object_detection_head_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="DETR does not use inputs_embeds") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="DETR does not have a get_input_embeddings method") def test_model_common_attributes(self): pass @unittest.skip(reason="DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @slow def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): print(t) t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: torch.testing.assert_close( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5, rtol=1e-5, msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( [hidden_states[0].shape[1], hidden_states[0].shape[2]], [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( [hidden_states[0].shape[1], hidden_states[0].shape[2]], [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # Had to modify the threshold to 2 decimals instead of 3 because sometimes it threw an error def test_batching_equivalence(self): """ Tests that the model supports batching and that the output is the nearly the same for the same input in different batch sizes. (Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535) """ def get_tensor_equivalence_function(batched_input): # models operating on continuous spaces have higher abs difference than LMs # instead, we can rely on cos distance for image/speech models, similar to `diffusers` if "input_ids" not in batched_input: return lambda tensor1, tensor2: ( 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=1e-38) ) return lambda tensor1, tensor2: torch.max(torch.abs(tensor1 - tensor2)) def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return else: # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-02, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() equivalence = get_tensor_equivalence_function(batched_input) for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"): config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: # e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] else: single_row_input[key] = value with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: # DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan` if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key: model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True decoder_seq_length = self.model_tester.decoder_seq_length encoder_seq_length = self.model_tester.encoder_seq_length decoder_key_length = self.model_tester.decoder_seq_length encoder_key_length = self.model_tester.encoder_seq_length for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] del inputs_dict["output_hidden_states"] config.output_attentions = True config.output_hidden_states = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 6 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: # decoder_hidden_states, encoder_last_hidden_state, encoder_hidden_states added_hidden_states = 3 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs, output_attentions=True, output_hidden_states=True) # logits output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.auxiliary_loss = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_outputs) self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") # We only have loss with ObjectDetection model_class = self.all_model_classes[-1] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.backbone_config = None config.use_timm_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DabDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propagated to backbone self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) else: # Confirm out_indices was propagated to backbone self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.init_xavier_std = 1e9 # Copied from RT-DETR configs_no_init.initializer_bias_prior_prob = 0.2 bias_value = -1.3863 # log_e ((1 - 0.2) / 0.2) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "bbox_attention" in name and "bias" not in name: self.assertLess( 100000, abs(param.data.max().item()), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # Modified from RT-DETR elif "class_embed" in name and "bias" in name: bias_tensor = torch.full_like(param.data, bias_value) torch.testing.assert_close( param.data, bias_tensor, atol=1e-4, rtol=1e-4, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "activation_fn" in name and config.activation_function == "prelu": self.assertTrue( param.data.mean() == 0.25, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "backbone.conv_encoder.model" in name: continue elif "self_attn.in_proj_weight" in name: self.assertIn( ((param.data.mean() * 1e2).round() / 1e2).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) TOLERANCE = 1e-4 CHECKPOINT = "IDEA-Research/dab-detr-resnet-50" # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DabDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return ConditionalDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None def test_inference_no_head(self): model = DabDetrModel.from_pretrained(CHECKPOINT).to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(pixel_values=encoding.pixel_values) expected_shape = torch.Size((1, 300, 256)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [ [-0.4879, -0.2594, 0.4524], [-0.4997, -0.4258, 0.4329], [-0.8220, -0.4996, 0.0577], ] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=2e-4, rtol=2e-4) def test_inference_object_detection_head(self): model = DabDetrForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values) # verify logits + box predictions expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slice_logits = torch.tensor( [ [-10.1764, -5.5247, -8.9324], [-9.8137, -5.6730, -7.5163], [-10.3056, -5.6075, -8.5935], ] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, atol=3e-4, rtol=3e-4) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [ [0.3708, 0.3000, 0.2754], [0.5211, 0.6126, 0.9494], [0.2897, 0.6731, 0.5460], ] ).to(torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=3e-4, rtol=3e-4) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.8732, 0.8563, 0.8554, 0.6080, 0.5895]).to(torch_device) expected_labels = [17, 75, 17, 75, 63] expected_boxes = torch.tensor([14.6931, 49.3886, 320.5176, 469.2762]).to(torch_device) self.assertEqual(len(results["scores"]), 5) torch.testing.assert_close(results["scores"], expected_scores, atol=3e-4, rtol=3e-4) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) torch.testing.assert_close(results["boxes"][0, :], expected_boxes, atol=3e-4, rtol=3e-4)
transformers/tests/models/dab_detr/test_modeling_dab_detr.py/0
{ "file_path": "transformers/tests/models/dab_detr/test_modeling_dab_detr.py", "repo_id": "transformers", "token_count": 18241 }
586
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Deformable DETR model.""" import inspect import math import unittest from transformers import DeformableDetrConfig, ResNetConfig, is_torch_available, is_vision_available from transformers.file_utils import cached_property from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_torch_bf16, require_vision, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DeformableDetrForObjectDetection, DeformableDetrModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DeformableDetrModelTester: def __init__( self, parent, batch_size=8, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=8, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, num_queries=12, num_channels=3, image_size=196, n_targets=8, num_labels=91, num_feature_levels=4, encoder_n_points=2, decoder_n_points=6, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_queries = num_queries self.num_channels = num_channels self.image_size = image_size self.n_targets = n_targets self.num_labels = num_labels self.num_feature_levels = num_feature_levels self.encoder_n_points = encoder_n_points self.decoder_n_points = decoder_n_points # we also set the expected seq length for both encoder and decoder self.encoder_seq_length = ( math.ceil(self.image_size / 8) ** 2 + math.ceil(self.image_size / 16) ** 2 + math.ceil(self.image_size / 32) ** 2 + math.ceil(self.image_size / 64) ** 2 ) self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) labels.append(target) config = self.get_config() return config, pixel_values, pixel_mask, labels def get_config(self): resnet_config = ResNetConfig( num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], hidden_act="relu", num_labels=3, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return DeformableDetrConfig( d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, num_queries=self.num_queries, num_labels=self.num_labels, num_feature_levels=self.num_feature_levels, encoder_n_points=self.encoder_n_points, decoder_n_points=self.decoder_n_points, use_timm_backbone=False, backbone=None, backbone_config=resnet_config, use_pretrained_backbone=False, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = DeformableDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class DeformableDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False test_torch_exportable = True # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "DeformableDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) target["masks"] = torch.ones( self.model_tester.n_targets, self.model_tester.image_size, self.model_tester.image_size, device=torch_device, dtype=torch.float, ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = DeformableDetrModelTester(self) self.config_tester = ConfigTester( self, config_class=DeformableDetrConfig, has_text_modality=False, common_properties=["num_channels", "d_model", "encoder_attention_heads", "decoder_attention_heads"], ) def test_config(self): self.config_tester.run_common_tests() def test_deformable_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) def test_deformable_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Deformable DETR does not use inputs_embeds") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Deformable DETR is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="Deformable DETR does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) out_len = len(outputs) correct_outlen = 8 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "DeformableDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.encoder_n_points, ], ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def test_retain_grad_hidden_states_attentions(self): # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the second output since last_hidden_state is the second item output = outputs[1] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_auxiliary_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.auxiliary_loss = True # only test for object detection and segmentation model for model_class in self.all_model_classes[1:]: model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) outputs = model(**inputs) self.assertIsNotNone(outputs.auxiliary_outputs) self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = ["pixel_values", "pixel_mask"] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.backbone_config = None config.use_timm_backbone = True config.backbone_kwargs = {"out_indices": [1, 2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propagated to backbone self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 4) else: # Confirm out_indices was propagated to backbone self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 4) self.assertTrue(outputs) def test_hf_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Load a pretrained HF checkpoint as backbone config.backbone = "microsoft/resnet-18" config.backbone_config = None config.use_timm_backbone = False config.use_pretrained_backbone = True config.backbone_kwargs = {"out_indices": [1, 2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "DeformableDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propagated to backbone self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 4) else: # Confirm out_indices was propagated to backbone self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 4) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: print("Model class:", model_class) model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_two_stage_training(self): model_class = DeformableDetrForObjectDetection config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True config.two_stage = True config.auxiliary_loss = True config.with_box_refine = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def create_and_check_model_fp16_forward(self): model_class = DeformableDetrForObjectDetection config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) model.to(torch_device) model.half() model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) output = model(**inputs)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) @require_torch_bf16 def create_and_check_model_bf16_forward(self): model_class = DeformableDetrForObjectDetection config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config, dtype=torch.bfloat16) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) output = model(**inputs)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_timm @require_vision @slow class DeformableDetrModelIntegrationTests(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None def test_inference_object_detection_head(self): model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [ [-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116], ] ).to(torch_device) expected_boxes = torch.tensor( [ [0.8693, 0.2290, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518], ] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-4, atol=2e-4) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-4) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7999, 0.7895, 0.6332, 0.4719, 0.4382]).to(torch_device) expected_labels = [17, 17, 75, 75, 63] expected_slice_boxes = torch.tensor([16.5028, 52.8391, 318.2544, 470.7841]).to(torch_device) self.assertEqual(len(results["scores"]), 5) torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-4, atol=2e-4) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=2e-4, atol=2e-4) def test_inference_object_detection_head_with_box_refine_two_stage(self): model = DeformableDetrForObjectDetection.from_pretrained( "SenseTime/deformable-detr-with-box-refine-two-stage" ).to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_logits = torch.tensor( [ [-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298], ] ).to(torch_device) expected_boxes = torch.tensor( [ [0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564], ] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-4, atol=2e-4) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-4) @require_torch_accelerator def test_inference_object_detection_head_equivalence_cpu_accelerator(self): image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt") pixel_values = encoding["pixel_values"] pixel_mask = encoding["pixel_mask"] # 1. run model on CPU model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") with torch.no_grad(): cpu_outputs = model(pixel_values, pixel_mask) # 2. run model on accelerator model.to(torch_device) with torch.no_grad(): gpu_outputs = model(pixel_values.to(torch_device), pixel_mask.to(torch_device)) # 3. assert equivalence # (on A10, the differences get larger than on T4) for key in cpu_outputs: torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), atol=2e-2, rtol=2e-2) expected_logits = torch.tensor( [ [-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003], ] ) assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=2e-4)
transformers/tests/models/deformable_detr/test_modeling_deformable_detr.py/0
{ "file_path": "transformers/tests/models/deformable_detr/test_modeling_deformable_detr.py", "repo_id": "transformers", "token_count": 15792 }
587
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert import test_tokenization_bert @require_tokenizers class DistilBertTokenizationTest(test_tokenization_bert.BertTokenizationTest): tokenizer_class = DistilBertTokenizer rust_tokenizer_class = DistilBertTokenizerFast test_rust_tokenizer = True from_pretrained_id = "distilbert/distilbert-base-uncased" @slow def test_sequence_builders(self): tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
transformers/tests/models/distilbert/test_tokenization_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_tokenization_distilbert.py", "repo_id": "transformers", "token_count": 594 }
588
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch DPT model.""" import unittest import pytest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTModelTester: def __init__( self, parent, batch_size=2, image_size=32, patch_size=16, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, backbone_out_indices=[0, 1, 2, 3], num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, neck_hidden_sizes=[16, 32], is_hybrid=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.backbone_out_indices = backbone_out_indices self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.is_hybrid = is_hybrid self.neck_hidden_sizes = neck_hidden_sizes # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, fusion_hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, neck_hidden_sizes=self.neck_hidden_sizes, ) def create_and_check_model(self, config, pixel_values, labels): model = DPTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = DPTForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () pipeline_model_mapping = ( { "depth-estimation": DPTForDepthEstimation, "image-feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = DPTModelTester(self) self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DPT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) def test_training(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values() or not model_class.supports_gradient_checkpointing: continue model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Inductor error for dynamic shape") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone backbone_params = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() if model.__class__.__name__ == "DPTForDepthEstimation": # Confirm out_indices propagated to backbone self.assertEqual(len(model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_pretrained_backbone = True config.backbone_config = None config.backbone_kwargs = {"out_indices": [-2, -1]} # Force load_backbone path config.is_hybrid = False # Load a timm backbone config.backbone = "resnet18" config.use_timm_backbone = True _validate_backbone_init() # Load a HF backbone config.backbone = "facebook/dinov2-small" config.use_timm_backbone = False _validate_backbone_init() @slow def test_model_from_pretrained(self): model_name = "Intel/dpt-large" model = DPTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class DPTModelIntegrationTest(unittest.TestCase): def test_inference_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # verify the predicted depth expected_shape = torch.Size((1, 384, 384)) self.assertEqual(predicted_depth.shape, expected_shape) expectations = Expectations( { (None, None): [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]], ("cuda", 8): [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4) def test_inference_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 150, 480, 480)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ).to(torch_device) torch.testing.assert_close(outputs.logits[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_post_processing_semantic_segmentation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) outputs.logits = outputs.logits.detach().cpu() segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((480, 480)) self.assertEqual(segmentation[0].shape, expected_shape) def test_post_processing_depth_estimation(self): image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large") model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") # forward pass with torch.no_grad(): outputs = model(**inputs) predicted_depth = image_processor.post_process_depth_estimation(outputs=outputs)[0]["predicted_depth"] expected_shape = torch.Size((384, 384)) self.assertTrue(predicted_depth.shape == expected_shape) predicted_depth_l = image_processor.post_process_depth_estimation(outputs=outputs, target_sizes=[(500, 500)]) predicted_depth_l = predicted_depth_l[0]["predicted_depth"] expected_shape = torch.Size((500, 500)) self.assertTrue(predicted_depth_l.shape == expected_shape) output_enlarged = torch.nn.functional.interpolate( predicted_depth.unsqueeze(0).unsqueeze(1), size=(500, 500), mode="bicubic", align_corners=False ).squeeze() self.assertTrue(output_enlarged.shape == expected_shape) torch.testing.assert_close(predicted_depth_l, output_enlarged, atol=1e-3, rtol=1e-3) @pytest.mark.torch_export_test def test_export(self): for strict in [True, False]: with self.subTest(strict=strict): if not is_torch_greater_or_equal_than_2_4: self.skipTest(reason="This test requires torch >= 2.4 to run.") model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device).eval() image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) exported_program = torch.export.export( model, args=(inputs["pixel_values"],), strict=strict, ) with torch.no_grad(): eager_outputs = model(**inputs) exported_outputs = exported_program.module().forward(inputs["pixel_values"]) self.assertEqual(eager_outputs.logits.shape, exported_outputs.logits.shape) torch.testing.assert_close(eager_outputs.logits, exported_outputs.logits, rtol=1e-4, atol=1e-4)
transformers/tests/models/dpt/test_modeling_dpt.py/0
{ "file_path": "transformers/tests/models/dpt/test_modeling_dpt.py", "repo_id": "transformers", "token_count": 7943 }
589
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from functools import lru_cache from transformers.models.esm.tokenization_esm import VOCAB_FILES_NAMES, EsmTokenizer from transformers.testing_utils import require_tokenizers from transformers.tokenization_utils import PreTrainedTokenizer from transformers.tokenization_utils_base import PreTrainedTokenizerBase from ...test_tokenization_common import use_cache_if_possible @require_tokenizers class ESMTokenizationTest(unittest.TestCase): tokenizer_class = EsmTokenizer @classmethod def setUpClass(cls): super().setUpClass() cls.tmpdirname = tempfile.mkdtemp() vocab_tokens: list[str] = ["<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>"] # fmt: skip cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizers(cls, **kwargs) -> list[PreTrainedTokenizerBase]: return [cls.get_tokenizer(**kwargs)] @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs) -> PreTrainedTokenizer: pretrained_name = pretrained_name or cls.tmpdirname return cls.tokenizer_class.from_pretrained(pretrained_name, **kwargs) def test_tokenizer_single_example(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("LAGVS") self.assertListEqual(tokens, ["L", "A", "G", "V", "S"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [4, 5, 6, 7, 8]) def test_tokenizer_encode_single(self): tokenizer = self.tokenizer_class(self.vocab_file) seq = "LAGVS" self.assertListEqual(tokenizer.encode(seq), [0, 4, 5, 6, 7, 8, 2]) def test_tokenizer_call_no_pad(self): tokenizer = self.tokenizer_class(self.vocab_file) seq_batch = ["LAGVS", "WCB"] tokens_batch = tokenizer(seq_batch, padding=False)["input_ids"] self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2]]) def test_tokenizer_call_pad(self): tokenizer = self.tokenizer_class(self.vocab_file) seq_batch = ["LAGVS", "WCB"] tokens_batch = tokenizer(seq_batch, padding=True)["input_ids"] self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2, 1, 1]]) def test_tokenize_special_tokens(self): """Test `tokenize` with special tokens.""" tokenizers = self.get_tokenizers(fast=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): SPECIAL_TOKEN_1 = "<unk>" SPECIAL_TOKEN_2 = "<mask>" token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) self.assertEqual(len(token_1), 1) self.assertEqual(len(token_2), 1) self.assertEqual(token_1[0], SPECIAL_TOKEN_1) self.assertEqual(token_2[0], SPECIAL_TOKEN_2) def test_add_tokens(self): tokenizer = self.tokenizer_class(self.vocab_file) vocab_size = len(tokenizer) self.assertEqual(tokenizer.add_tokens(""), 0) self.assertEqual(tokenizer.add_tokens("testoken"), 1) self.assertEqual(tokenizer.add_tokens(["testoken1", "testtoken2"]), 2) self.assertEqual(len(tokenizer), vocab_size + 3) self.assertEqual(tokenizer.add_special_tokens({}), 0) self.assertEqual(tokenizer.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2) self.assertRaises(AssertionError, tokenizer.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}) self.assertEqual(tokenizer.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1) self.assertEqual( tokenizer.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2 ) self.assertIn("<testtoken3>", tokenizer.special_tokens_map["additional_special_tokens"]) self.assertIsInstance(tokenizer.special_tokens_map["additional_special_tokens"], list) self.assertGreaterEqual(len(tokenizer.special_tokens_map["additional_special_tokens"]), 2) self.assertEqual(len(tokenizer), vocab_size + 8)
transformers/tests/models/esm/test_tokenization_esm.py/0
{ "file_path": "transformers/tests/models/esm/test_tokenization_esm.py", "repo_id": "transformers", "token_count": 2217 }
590
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import FlaubertConfig, is_sacremoses_available, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import create_sinusoidal_embeddings class FlaubertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=12, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type="last", use_proj=None, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.summary_type = summary_type self.use_proj = use_proj self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def get_config(self): return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) # Doesn't run generation tests. Outdated custom `prepare_inputs_for_generation` -- TODO @gante all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() and is_sacremoses_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False # Flaubert has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) # Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->Flaubert def test_flaubert_model_with_sinusoidal_encodings(self): config = FlaubertConfig(sinusoidal_embeddings=True) model = FlaubertModel(config=config) sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.emb_dim), dtype=torch.float32) create_sinusoidal_embeddings(config.max_position_embeddings, config.emb_dim, sinusoidal_pos_embds) self.model_tester.parent.assertTrue(torch.equal(model.position_embeddings.weight, sinusoidal_pos_embds)) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "flaubert/flaubert_small_cased" model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/flaubert/test_modeling_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_modeling_flaubert.py", "repo_id": "transformers", "token_count": 8433 }
591
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer from transformers.testing_utils import slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin # using a different tiny model than the one used for default params defined in init to ensure proper testing FSMT_TINY2 = "stas/tiny-wmt19-en-ru" class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "stas/tiny-wmt19-en-de" tokenizer_class = FSMTTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] cls.langs = ["en", "ru"] config = { "langs": cls.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } cls.src_vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["src_vocab_file"]) cls.tgt_vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["tgt_vocab_file"]) config_file = os.path.join(cls.tmpdirname, "tokenizer_config.json") cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(cls.tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(cls.merges_file, "w") as fp: fp.write("\n".join(merges)) with open(config_file, "w") as fp: fp.write(json.dumps(config)) @cached_property def tokenizer_ru_en(self): return FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en") @cached_property def tokenizer_en_ru(self): return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru") def test_online_tokenizer_config(self): """this just tests that the online tokenizer files get correctly fetched and loaded via its tokenizer_config.json and it's not slow so it's run by normal CI """ tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2) self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"]) self.assertEqual(tokenizer.src_vocab_size, 21) self.assertEqual(tokenizer.tgt_vocab_size, 21) def test_full_tokenizer(self): """Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt""" tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_ru_en text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == text + [2] assert encoded_pair == text + [2] + text_2 + [2] @slow def test_match_encode_decode(self): tokenizer_enc = self.tokenizer_en_ru tokenizer_dec = self.tokenizer_ru_en targets = [ [ "Here's a little song I wrote. Don't worry, be happy.", [2470, 39, 11, 2349, 7222, 70, 5979, 7, 8450, 1050, 13160, 5, 26, 6445, 7, 2], ], ["This is it. No more. I'm done!", [132, 21, 37, 7, 1434, 86, 7, 70, 6476, 1305, 427, 2]], ] # if data needs to be recreated or added, run: # import torch # model = torch.hub.load("pytorch/fairseq", "transformer.wmt19.en-ru", checkpoint_file="model4.pt", tokenizer="moses", bpe="fastbpe") # for src_text, _ in targets: print(f"""[\n"{src_text}",\n {model.encode(src_text).tolist()}\n],""") for src_text, tgt_input_ids in targets: encoded_ids = tokenizer_enc.encode(src_text, return_tensors=None) self.assertListEqual(encoded_ids, tgt_input_ids) # and decode backward, using the reversed languages model decoded_text = tokenizer_dec.decode(encoded_ids, skip_special_tokens=True) self.assertEqual(decoded_text, src_text) @slow def test_tokenizer_lower(self): tokenizer = FSMTTokenizer.from_pretrained("facebook/wmt19-ru-en", do_lower_case=True) tokens = tokenizer.tokenize("USA is United States of America") expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"] self.assertListEqual(tokens, expected) @unittest.skip(reason="FSMTConfig.__init__ requires non-optional args") def test_torch_encode_plus_sent_to_model(self): pass @unittest.skip(reason="FSMTConfig.__init__ requires non-optional args") def test_np_encode_plus_sent_to_model(self): pass
transformers/tests/models/fsmt/test_tokenization_fsmt.py/0
{ "file_path": "transformers/tests/models/fsmt/test_tokenization_fsmt.py", "repo_id": "transformers", "token_count": 2989 }
592
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import shutil import tempfile import unittest import numpy as np from transformers import AutoProcessor from transformers.testing_utils import require_av, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import Glm4vProcessor if is_torch_available(): import torch @require_vision @require_torch class Glm4vProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Glm4vProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() processor = Glm4vProcessor.from_pretrained( "THUDM/GLM-4.1V-9B-Thinking", patch_size=4, size={"shortest_edge": 12 * 12, "longest_edge": 18 * 18} ) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_video_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor def get_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) @require_torch @require_av def _test_apply_chat_template( self, modality: str, batch_size: int, return_tensors: str, input_name: str, processor_name: str, input_data: list[str], ): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if processor_name not in self.processor_class.attributes: self.skipTest(f"{processor_name} attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer( formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens ) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors=return_tensors, max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(input_data[:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors=return_tensors, fps=2, # by default no more than 2 frames per second, otherwise too slow ) input_name = getattr(self, input_name) self.assertTrue(input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) if modality == "video": # qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw expected_video_token_count = 0 for thw in out_dict["video_grid_thw"]: expected_video_token_count += thw[0] * thw[1] * thw[2] mm_len = expected_video_token_count else: mm_len = batch_size * 4 self.assertEqual(len(out_dict[input_name]), mm_len) return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list} for k in out_dict: self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors]) @require_av @unittest.skip("GLM4V can't sample frames from image frames") def test_apply_chat_template_video_1(self): pass @require_av @unittest.skip("GLM4V can't sample frames from image frames") def test_apply_chat_template_video_2(self): pass @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ {"type": "video"}, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), 1) formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"]) # Add video URL for return dict and load with `num_frames` arg messages[0][0]["content"][0] = { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4", } # Load with `video_fps` arg video_fps = 1 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, video_fps=video_fps, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 40) # Load without any arg should load the whole video out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_sample_frames=False, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 600) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][0] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, do_sample_frames=False, ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 4)
transformers/tests/models/glm4v/test_processor_glm4v.py/0
{ "file_path": "transformers/tests/models/glm4v/test_processor_glm4v.py", "repo_id": "transformers", "token_count": 4441 }
593
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the IBM Granite Speech model.""" import tempfile import unittest import pytest from transformers import ( AutoProcessor, GraniteSpeechConfig, GraniteSpeechForConditionalGeneration, ) from transformers.testing_utils import ( cleanup, require_torch, slow, torch_device, ) from transformers.utils import ( is_datasets_available, is_peft_available, is_torch_available, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset class GraniteSpeechForConditionalGenerationModelTester: def __init__( self, parent, seq_length=7, encoder_config={ "model_type": "granite_speech_encoder", "context_size": 200, "conv_expansion_factor": 2, "conv_kernel_size": 15, "dim_head": 32, "dropout": 0.1, "feedforward_mult": 4, "hidden_dim": 32, "input_dim": 160, "num_heads": 4, "num_layers": 2, "output_dim": 42, }, text_config={ "model_type": "granite", "is_training": True, "seq_length": 7, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 580, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, projector_config={ "attention_probs_dropout_prob": 0.1, "cross_attention_frequency": 1, "encoder_hidden_size": 32, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 32, "initializer_range": 0.02, "intermediate_size": 256, "layer_norm_eps": 1e-12, "max_position_embeddings": 2048, "model_type": "blip_2_qformer", "num_attention_heads": 4, "num_hidden_layers": 2, "position_embedding_type": "absolute", "use_qformer_text_input": False, "vocab_size": 30522, }, audio_token_index=0, tie_word_embeddings=True, initializer_range=0.02, has_lora_adapter=True, downsample_rate=5, window_size=15, is_training=True, ): self.parent = parent self.encoder_config = encoder_config self.text_config = text_config self.projector_config = projector_config self.audio_token_index = audio_token_index self.tie_word_embeddings = tie_word_embeddings self.initializer_range = initializer_range self.has_lora_adapater = has_lora_adapter self.downsample_rate = downsample_rate self.window_size = window_size self.is_training = is_training # Dims for audio features self.sequence_dim = 844 self.feature_dim = 160 self.num_attention_heads = text_config["num_attention_heads"] self.num_hidden_layers = text_config["num_hidden_layers"] self.hidden_size = text_config["hidden_size"] self.batch_size = 3 self.pad_token_id = text_config["pad_token_id"] self.seq_len = 7 self.num_audio_tokens = 2 self.seq_length = seq_length + self.num_audio_tokens def get_config(self): return GraniteSpeechConfig( encoder_config=self.encoder_config, text_config=self.text_config, projector_config=self.projector_config, audio_token_index=self.audio_token_index, tie_word_embeddings=self.tie_word_embeddings, initializer_range=self.initializer_range, has_lora_adapter=self.has_lora_adapater, ) def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.sequence_dim, self.feature_dim], ) config = self.get_config() return config, input_features def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_features = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) input_ids[input_ids == config.audio_token_index] = self.pad_token_id input_ids[:, : self.num_audio_tokens] = config.audio_token_index inputs_dict = { "input_features": input_features, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def create_and_check_granite_speech_model_fp16_forward(self, config, input_ids, input_features, attention_mask): model = GraniteSpeechForConditionalGeneration(config=config) model.to(torch_device) model.half() model.eval() logits = model( input_ids=input_ids, attention_mask=attention_mask, input_features=input_features, return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) def create_and_check_granite_speech_model_fp16_autocast_forward( self, config, input_ids, input_features, attention_mask, ): config.dtype = torch.float16 model = GraniteSpeechForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, input_features=input_features.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class GraniteSpeechForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `GraniteSpeechForConditionalGeneration`. """ all_model_classes = (GraniteSpeechForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = GraniteSpeechForConditionalGenerationModelTester(self) self.config_tester = ConfigTester( self, config_class=GraniteSpeechConfig, has_text_modality=False, ) def test_inputs_embeds(self): # overwrite inputs_embeds tests because we need to delete "input features" for the audio model config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["input_features"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if name == "projector.query": continue elif param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_sdpa_can_dispatch_composite_models(self): # overwrite because Granite Speech is audio+text model (not vision+text) if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: # NOTE - currently we only enable alternate attention implementations on # the encapsulated LLM; in the future, this should be added for the conformer # encoder as well. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) text_attn = "sdpa" if model.language_model._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model.language_model.config._attn_implementation == text_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") @pytest.mark.generate @slow @unittest.skip(reason="Granite Speech doesn't support SDPA for all backbones") def test_eager_matches_sdpa_generate(self): pass class GraniteSpeechForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.model_path = "ibm-granite/granite-speech-3.3-2b" self.processor = AutoProcessor.from_pretrained(self.model_path) self.prompt = self._get_prompt(self.processor.tokenizer) def tearDown(self): cleanup(torch_device, gc_collect=True) def _get_prompt(self, tokenizer): chat = [ { "role": "system", "content": "Knowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant", }, { "role": "user", "content": "<|audio|>can you transcribe the speech into a written format?", }, ] return tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow @pytest.mark.skipif(not is_peft_available(), reason="Outputs diverge without lora") def test_small_model_integration_test_single(self): model = GraniteSpeechForConditionalGeneration.from_pretrained(self.model_path).to(torch_device) input_speech = self._load_datasamples(1) # Verify feature sizes; note that the feature mask refers to the size of # features that are masked into the LLM, not the output of the processor, # which is why we inspect the mask instead of the `num_features` tensor. inputs = self.processor(self.prompt, input_speech, return_tensors="pt").to(torch_device) num_computed_features = self.processor.audio_processor._get_num_audio_features( [speech_arr.shape[-1] for speech_arr in input_speech], )[0] num_actual_features = torch.sum(inputs["input_features_mask"]).item() assert num_actual_features == num_computed_features # verify generation output = model.generate(**inputs, max_new_tokens=32) EXPECTED_DECODED_TEXT = "systemKnowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant\nusercan you transcribe the speech into a written format?\nassistantmister quilter is the apostle of the middle classes and we are glad to welcome his gospel" # fmt: skip self.assertEqual( self.processor.tokenizer.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @pytest.mark.skipif(not is_peft_available(), reason="Outputs diverge without lora") def test_small_model_integration_test_batch(self): model = GraniteSpeechForConditionalGeneration.from_pretrained(self.model_path).to(torch_device) input_speech = self._load_datasamples(2) prompts = [self.prompt, self.prompt] # Verify feature sizes & padding inputs = self.processor(prompts, input_speech, return_tensors="pt").to(model.device) num_computed_features = self.processor.audio_processor._get_num_audio_features( [speech_arr.shape[-1] for speech_arr in input_speech], ) num_actual_features = torch.sum(inputs["input_features_mask"], dim=-1) for e_feats, a_feats in zip(num_computed_features, num_actual_features): assert e_feats == a_feats.item() # verify generation output = model.generate(**inputs, max_new_tokens=32) EXPECTED_DECODED_TEXT = [ "systemKnowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant\nusercan you transcribe the speech into a written format?\nassistantmister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "systemKnowledge Cutoff Date: April 2024.\nToday's Date: December 19, 2024.\nYou are Granite, developed by IBM. You are a helpful AI assistant\nusercan you transcribe the speech into a written format?\nassistantnor is mister quilter's manner less interesting than his matter" ] # fmt: skip self.assertEqual( self.processor.tokenizer.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
transformers/tests/models/granite_speech/test_modeling_granite_speech.py/0
{ "file_path": "transformers/tests/models/granite_speech/test_modeling_granite_speech.py", "repo_id": "transformers", "token_count": 7157 }
594
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Idefics model.""" import inspect import unittest import pytest from parameterized import parameterized from transformers import BitsAndBytesConfig, IdeficsConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( TestCasePlus, require_bitsandbytes, require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import IdeficsForVisionText2Text, IdeficsModel, IdeficsProcessor from transformers.models.idefics.configuration_idefics import IdeficsPerceiverConfig, IdeficsVisionConfig if is_vision_available(): from PIL import Image class IdeficsModelTester: def __init__( self, parent, batch_size=1, seq_length=7, image_size=30, patch_size=2, num_channels=3, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, alpha_initializer="ones", num_labels=3, scope=None, modality_type_vocab_size=2, vision_embed_dim=32, vision_patch_size=2, vision_image_size=30, vision_num_attention_heads=4, vision_num_hidden_layers=5, vision_intermediate_size=37, perceiver_qk_layer_norms_perceiver=False, perceiver_resampler_depth=2, perceiver_resampler_head_dim=8, perceiver_resampler_n_heads=2, perceiver_resampler_n_latents=16, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.alpha_initializer = alpha_initializer self.num_labels = num_labels self.scope = scope self.modality_type_vocab_size = modality_type_vocab_size self.vision_embed_dim = vision_embed_dim self.vision_patch_size = vision_patch_size self.vision_image_size = vision_image_size self.vision_num_attention_heads = vision_num_attention_heads self.vision_num_hidden_layers = vision_num_hidden_layers self.vision_intermediate_size = vision_intermediate_size self.vision_config = IdeficsVisionConfig( embed_dim=self.vision_embed_dim, patch_size=self.vision_patch_size, image_size=self.vision_image_size, num_attention_heads=self.vision_num_attention_heads, num_hidden_layers=self.vision_num_hidden_layers, intermediate_size=self.vision_intermediate_size, ).to_dict() self.perceiver_qk_layer_norms_perceiver = perceiver_qk_layer_norms_perceiver self.perceiver_resampler_depth = perceiver_resampler_depth self.perceiver_resampler_head_dim = perceiver_resampler_head_dim self.perceiver_resampler_n_heads = perceiver_resampler_n_heads self.perceiver_resampler_n_latents = perceiver_resampler_n_latents self.perceiver_config = IdeficsPerceiverConfig( qk_layer_norms_perceiver=self.perceiver_qk_layer_norms_perceiver, resampler_depth=self.perceiver_resampler_depth, resampler_head_dim=self.perceiver_resampler_head_dim, resampler_n_heads=self.perceiver_resampler_n_heads, resampler_n_latents=self.perceiver_resampler_n_latents, ) # we set the expected sequence length (which is used in several tests) # this is equal to the seq length of the text tokens + number of image patches + 1 for the CLS token self.expected_seq_len = self.seq_length + (self.image_size // self.patch_size) ** 2 + 1 def prepare_config_and_inputs(self, num_images=1, interpolate_pos_encoding=False, image_expansion=0): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) pixel_values = floats_tensor( [ self.batch_size, num_images, self.num_channels, self.image_size + image_expansion, self.image_size + image_expansion, ] ) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) image_attention_mask = random_attention_mask([self.batch_size, self.seq_length, num_images]) config = self.get_config() return (config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding) def prepare_config_and_inputs_gate_tests(self): # Create a list of configs and inputs, to test 2 things: # 1. For the same image, the output should be different when image_attention_mask is filled with 0s vs filled with 1s. # 2. For 2 different images, the output should be the same when image_attention_mask is filled with 0s. interpolate_pos_encoding = False input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) pixel_values = floats_tensor( [ self.batch_size, 1, self.num_channels, self.image_size, self.image_size, ] ) pixel_values_list = [ pixel_values.clone(), pixel_values.clone(), pixel_values.clone().fill_(0.6), pixel_values.clone().fill_(0.3), ] attention_mask = None if self.use_input_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) image_attention_mask = random_attention_mask([self.batch_size, self.seq_length, 1]) image_attention_mask_list = [ image_attention_mask.clone().fill_(0), image_attention_mask.clone().fill_(1), image_attention_mask.clone().fill_(0), image_attention_mask.clone().fill_(0), ] config = self.get_config() inputs_list = [] for pixel_values, image_attention_mask in zip(pixel_values_list, image_attention_mask_list): inputs_list.append( { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } ) inputs_w_same_img = inputs_list[:2] inputs_w_0_img_attn = inputs_list[2:] return config, inputs_w_same_img, inputs_w_0_img_attn def get_config(self): return IdeficsConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, alpha_initializer=self.alpha_initializer, num_labels=self.num_labels, modality_type_vocab_size=self.modality_type_vocab_size, vision_config=self.vision_config, ) def create_and_check_model( self, config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ): model = IdeficsModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, pixel_values=pixel_values, image_attention_mask=image_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, input_ids.shape[1], self.hidden_size) ) def create_and_check_model_gen( self, config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ): model = IdeficsForVisionText2Text(config) model.to(torch_device) model.eval() model.generate( input_ids, attention_mask=input_mask, pixel_values=pixel_values, image_attention_mask=image_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, max_length=self.seq_length + 2, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, pixel_values, image_attention_mask, interpolate_pos_encoding, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } return config, inputs_dict def prepare_pixel_values(self): return floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) @require_torch class IdeficsModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (IdeficsModel, IdeficsForVisionText2Text) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": IdeficsModel, "image-text-to-text": IdeficsForVisionText2Text} if is_torch_available() else {} ) test_pruning = False test_headmasking = False test_torchscript = False has_attentions = False # only supports SDOA and thus no attention probs returned def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) # XXX: IdeficsForVisionText2TextTest has no MODEL_FOR group yet, but it should be the same # as MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, so for now manually changing to do the right thing # as super won't do it if return_labels: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @unittest.skip("Idefics requires both text and image inputs which is currently not done in this test.") def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): pass def test_model_outputs_equivalence(self): try: orig = self.all_model_classes # IdeficsModel.forward doesn't have labels input arg - only IdeficsForVisionText2Text does self.all_model_classes = (IdeficsForVisionText2Text,) if is_torch_available() else () super().test_model_outputs_equivalence() finally: self.all_model_classes = orig def setUp(self): self.model_tester = IdeficsModelTester(self) self.config_tester = ConfigTester(self, config_class=IdeficsConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_single_image(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=1, interpolate_pos_encoding=False, image_expansion=0 ) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_multiple_images(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=2, interpolate_pos_encoding=False, image_expansion=0 ) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_image_pos_embeddings_interpolation_single_image(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=1, interpolate_pos_encoding=True, image_expansion=2 ) self.model_tester.create_and_check_model(*config_and_inputs) config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=1, interpolate_pos_encoding=True, image_expansion=0 ) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_image_pos_embeddings_interpolation_multiple_images(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=2, interpolate_pos_encoding=True, image_expansion=2 ) self.model_tester.create_and_check_model(*config_and_inputs) config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=2, interpolate_pos_encoding=True, image_expansion=0 ) self.model_tester.create_and_check_model(*config_and_inputs) def test_generate_with_image_pos_embeddings_interpolation_single_image(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=1, interpolate_pos_encoding=True, image_expansion=2 ) self.model_tester.create_and_check_model_gen(*config_and_inputs) def test_generate_with_image_pos_embeddings_interpolation_multiple_images(self): config_and_inputs = self.model_tester.prepare_config_and_inputs( num_images=2, interpolate_pos_encoding=True, image_expansion=2 ) self.model_tester.create_and_check_model_gen(*config_and_inputs) def test_cross_attention_gates(self): config, inputs_w_same_img, inputs_w_0_img_attn = self.model_tester.prepare_config_and_inputs_gate_tests() model = IdeficsModel(config=config).to(torch_device) model.eval() test_1_results = [] for inputs in inputs_w_same_img: with torch.no_grad(): last_hidden_states = model(**inputs).last_hidden_state last_hidden_states = model(**inputs).last_hidden_state test_1_results.append(last_hidden_states) self.assertNotEqual(test_1_results[0].sum().item(), test_1_results[1].sum().item()) test_2_results = [] for inputs in inputs_w_0_img_attn: with torch.no_grad(): last_hidden_states = model(**inputs).last_hidden_state test_2_results.append(last_hidden_states) self.assertEqual(test_2_results[0].sum().item(), test_2_results[1].sum().item()) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes: # IdeficsModel does not support training, users should use # IdeficsForVisionText2Text for this purpose if model_class == IdeficsModel: self.skipTest(reason="IdeficsModel does not support training") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes: # IdeficsModel does not support training, users should use # IdeficsForVisionText2Text for this purpose if model_class == IdeficsModel: self.skipTest(reason="IdeficsModel does not support training") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="""IDEFICS does not support retaining the gradients of the hidden states and attention""") def test_retain_grad_hidden_states_attentions(self): return @pytest.mark.generate @unittest.skip(reason="""IDEFICS cannot generate with no images provided!""") def test_generate_without_input_ids(self): pass @pytest.mark.generate @unittest.skip(reason="""IDEFICS cannot generate with no images provided!""") def test_generate_continue_from_inputs_embeds(self): pass @pytest.mark.generate @unittest.skip(reason="""IDEFICS cannot do contrastive generation yet and it is not worth fixing""") def test_contrastive_generate(self): pass @pytest.mark.generate @unittest.skip(reason="""IDEFICS cannot do contrastive generation yet and it is not worth fixing""") def test_contrastive_generate_low_memory(self): pass @pytest.mark.generate @unittest.skip(reason="""IDEFICS cannot do contrastive generation yet and it is not worth fixing""") def test_contrastive_generate_dict_outputs_use_cache(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions # IDEFICS does not support outputting attention score because it uses SDPA under the hood self.assertTrue(attentions[0] is None) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) # IDEFICS does not support outputting attention score because it uses SDPA under the hood self.assertTrue(self_attentions[0] is None) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): model_name = "HuggingFaceM4/idefics-9b" model = IdeficsModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip("Idefics has a hard requirement on SDPA") def test_sdpa_can_dispatch_non_composite_models(self): pass @unittest.skip(reason="Idefics can't do text-only inference") def test_generate_from_random_inputs_embeds( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): pass @require_torch class IdeficsForVisionText2TextTest(IdeficsModelTest, GenerationTesterMixin, unittest.TestCase): all_model_classes = (IdeficsForVisionText2Text,) if is_torch_available() else () def setUp(self): self.model_tester = IdeficsModelTester( self, modality_type_vocab_size=3, ) self.config_tester = ConfigTester(self, config_class=IdeficsConfig, hidden_size=37) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @unittest.skip("Idefics requires both text and image inputs which is currently not done in this test.") def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): pass @pytest.mark.generate def test_left_padding_compatibility(self): """Overwrite because IDEFICS needs image attention mask to be also padded""" # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 def _prepare_model_kwargs(input_ids, attention_mask, image_attention_mask, signature): model_kwargs = { "input_ids": input_ids, "attention_mask": attention_mask, "image_attention_mask": image_attention_mask, } if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[-1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() input_ids = inputs_dict.pop("input_ids") attention_mask = inputs_dict.pop("attention_mask") if attention_mask is None: attention_mask = torch.ones_like(input_ids) image_attention_mask = inputs_dict.pop("image_attention_mask", None) model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # no cache as some models require special cache classes to be init outside forward model.generation_config.use_cache = False # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, image_attention_mask, signature) next_logits_wo_padding = model(**model_kwargs, **inputs_dict).logits[:, -1, :] # With left-padding (length 32) # can hardcode pad_token to be 0 as we'll do attn masking anyway pad_token_id = ( config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0 ) pad_size = (input_ids.shape[0], 32) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) pad_size_img = (input_ids.shape[0], 32, image_attention_mask.shape[-1]) extra_img_mask = torch.zeros(pad_size_img, dtype=image_attention_mask.dtype, device=torch_device) padded_image_attention_mask = torch.cat([extra_img_mask, image_attention_mask], dim=1) model_kwargs = _prepare_model_kwargs( padded_input_ids, padded_attention_mask, padded_image_attention_mask, signature ) next_logits_with_padding = model(**model_kwargs, **inputs_dict).logits[:, -1, :] # They should result in very similar logits torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5) @pytest.mark.generate def test_generate_continue_from_past_key_values(self): """Overwrite because IDEFICS needs image attention mask to be also processed""" # Tests that we can continue generating from past key values, returned from a previous `generate` call for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() # Let's make it always: # 1. use cache (for obvious reasons) # 2. generate to max length (which can be achieved by setting the eos token to an invalid value), which # would make the test flaky (e.g. EOS is generated on iteration 1 on both generations, but the # continuation would force it to generate beyond an EOS token) # 3. ignore `token_type_ids` for simplicity # 4. ignore `forced_eos_token_id`, which requires further manipulation of the continuation inputs and is # active by default on some models # 5. ignore `encoder_no_repeat_ngram_size`, which is set by default in some encoder-decoder models. When # we use their decoder as a stand-alone model, `encoder_no_repeat_ngram_size` actually prevents # repetition exclusively from the prompt. This test relies on comparing one call vs 2 calls # with cache, what is considered a prompt is different in the two cases. model = model_class(config).to(torch_device) model.eval() model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None model.generation_config.encoder_no_repeat_ngram_size = 0 model.generation_config.use_cache = True # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, do_sample=False, max_new_tokens=4, return_dict_in_generate=True) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=3, return_dict_in_generate=True) # Continue from the tokens generated above, preparing the inputs accordingly inputs["past_key_values"] = outputs_cached.past_key_values new_attention_len = outputs_cached.sequences.shape[-1] inputs["input_ids"] = outputs_cached.sequences if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) if "image_attention_mask" in inputs: inputs["image_attention_mask"] = inputs["image_attention_mask"][:, -1:, :] outputs_cached = model.generate(**inputs, do_sample=False, max_new_tokens=1, return_dict_in_generate=True) # The two sets of generated text and past kv should be equal to each other self.assertListEqual(outputs.sequences.tolist(), outputs_cached.sequences.tolist()) for layer_idx in range(len(outputs_cached.past_key_values)): for kv_idx in range(len(outputs_cached.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], outputs_cached.past_key_values[layer_idx][kv_idx], ) ) @pytest.mark.generate def test_generate_without_input_ids(self): """Overwrite because IDEFICS needs image attention mask to be also processed and requires image at input always.""" config, input_dict = self.prepare_config_and_inputs_for_generate() pixel_values = input_dict["pixel_values"] image_attention_mask = input_dict["image_attention_mask"][:, -1:, :] # hack in case they are equal, otherwise the attn mask will be [0] if config.bos_token_id == config.pad_token_id: config.pad_token_id = None for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate( pixel_values=pixel_values, image_attention_mask=image_attention_mask, do_sample=False, max_new_tokens=self.max_new_tokens, remove_invalid_values=True, ) self.assertIsNotNone(output_ids_generate) @pytest.mark.generate def test_generate_continue_from_inputs_embeds(self): """Overwrite for IDEFICS: Ensure image attention mask is processed while continuing from `inputs_embeds`.""" for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() print(inputs) model = model_class(config).to(torch_device).eval() model.generation_config.pad_token_id = model.generation_config.eos_token_id = -1 model.generation_config.forced_eos_token_id = None model.generation_config.use_cache = True input_ids = inputs.pop("input_ids") input_embeds = model.get_input_embeddings()(input_ids) generation_kwargs = { "return_dict_in_generate": True, "do_sample": False, } inputs["inputs_embeds"] = input_embeds # Traditional way of generating text, with `return_dict_in_generate` to return the past key values outputs = model.generate(**inputs, max_new_tokens=4, **generation_kwargs) # Let's generate again, but passing the past key values in between (3 + 1 = 4 tokens). Note that the # inputs may need to be tweaked across `generate` calls (like the attention mask). initial_output = model.generate(**inputs, max_new_tokens=3, **generation_kwargs) inputs["past_key_values"] = initial_output.past_key_values new_attention_len = input_ids.shape[1] + initial_output.sequences.shape[-1] continued_embeds = torch.cat([input_embeds, model.get_input_embeddings()(initial_output.sequences)], dim=1) inputs["inputs_embeds"] = continued_embeds if "attention_mask" in inputs: inputs["attention_mask"] = torch.nn.functional.pad( inputs["attention_mask"], (0, new_attention_len - inputs["attention_mask"].shape[1]), mode="constant", value=1, ) if "image_attention_mask" in inputs: inputs["image_attention_mask"] = inputs["image_attention_mask"][..., -1:, :] cached_output = model.generate(**inputs, max_new_tokens=1, **generation_kwargs) # Verify that the combined outputs match the full generation. combined_output_sequences = torch.concat([initial_output.sequences, cached_output.sequences], axis=1) self.assertListEqual(outputs.sequences.tolist(), combined_output_sequences.tolist()) for layer_idx in range(len(cached_output.past_key_values)): for kv_idx in range(len(cached_output.past_key_values[layer_idx])): self.assertTrue( torch.allclose( outputs.past_key_values[layer_idx][kv_idx], cached_output.past_key_values[layer_idx][kv_idx], ) ) def _check_attentions_for_generate( self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values ): """ Overwrite from generation tests because Idefics has only SDPA layers. Do not skip because we still want generation tests to run. Rather we can remove checks for shape. """ pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate(self): pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate_low_memory(self): pass @unittest.skip(reason="We only test the model that takes in multiple images") def test_custom_4d_attention_mask(self): pass @unittest.skip(reason="IDEFICS cannot compile due to dynamic control flow when checking inputs") def test_generate_with_static_cache(self): pass @unittest.skip(reason="We only test the model that takes in multiple images") def test_model(self): pass @unittest.skip(reason="We only test the model that takes in multiple images") def test_for_token_classification(self): pass @unittest.skip(reason="""IDEFICS does not support retaining the gradients of the hidden states and attention""") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip("Idefics has a hard requirement on SDPA") def test_sdpa_can_dispatch_non_composite_models(self): pass @unittest.skip( "Idefics has a separate test runner for generation tests with complex inheritance, causing this check to fail" ) def test_generation_tester_mixin_inheritance(self): pass @unittest.skip(reason="Idefics can't do text-only inference") def test_generate_from_random_inputs_embeds( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels ): pass @require_torch @require_vision class IdeficsModelIntegrationTest(TestCasePlus): @cached_property def default_processor(self): return ( IdeficsProcessor.from_pretrained("HuggingFaceM4/idefics-9b", revision="refs/pr/11") if is_vision_available() else None ) @require_bitsandbytes @slow def test_inference_natural_language_visual_reasoning(self): cat_image_path = self.tests_dir / "fixtures/tests_samples/COCO/000000039769.png" cats_image_obj = Image.open(cat_image_path) # 2 cats dogs_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg" prompts = [ [ "User:", dogs_image_url, "Describe this image.\nAssistant: An image of two dogs.\n", "User:", cats_image_obj, "Describe this image.\nAssistant:", ], [ "User:", cats_image_obj, "Describe this image.\nAssistant: An image of two kittens.\n", "User:", dogs_image_url, "Describe this image.\nAssistant:", ], ] # the CI gpu is small so using quantization to fit quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype="float16", ) model = IdeficsForVisionText2Text.from_pretrained( "HuggingFaceM4/idefics-9b", quantization_config=quantization_config, device_map="auto" ) processor = self.default_processor inputs = processor(text=prompts, return_tensors="pt", padding="longest").to(torch_device) generated_ids = model.generate(**inputs, max_length=100) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True) # keep for debugging for i, t in enumerate(generated_text): t = bytes(t, "utf-8").decode("unicode_escape") print(f"{i}:\n{t}\n") self.assertIn("image of two cats", generated_text[0]) self.assertIn("image of two dogs", generated_text[1])
transformers/tests/models/idefics/test_modeling_idefics.py/0
{ "file_path": "transformers/tests/models/idefics/test_modeling_idefics.py", "repo_id": "transformers", "token_count": 18576 }
595
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Informer model.""" import inspect import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from transformers.utils import check_torch_load_is_safe from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import InformerConfig, InformerForPrediction, InformerModel from transformers.models.informer.modeling_informer import ( InformerDecoder, InformerEncoder, InformerSinusoidalPositionalEmbedding, ) @require_torch class InformerModelTester: def __init__( self, parent, batch_size=13, prediction_length=7, context_length=14, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], sampling_factor=10, distil=False, ): self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = min( sampling_factor * np.ceil(np.log1p(context_length)).astype("int").item(), context_length ) self.decoder_seq_length = min( sampling_factor * np.ceil(np.log1p(prediction_length)).astype("int").item(), prediction_length ) self.sampling_factor = sampling_factor self.distil = distil def get_config(self): return InformerConfig( prediction_length=self.prediction_length, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, context_length=self.context_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, num_static_real_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], sampling_factor=self.sampling_factor, distil=self.distil, ) def prepare_informer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) static_real_features = floats_tensor([self.batch_size, 1]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 # decoder inputs future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "static_real_features": static_real_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_informer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = InformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = InformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, _, _, _ = model.create_network_inputs(**inputs_dict) enc_input = transformer_inputs[:, : config.context_length, ...] dec_input = transformer_inputs[:, config.context_length :, ...] encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) embed_positions = InformerSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) embed_positions._init_weight() embed_positions = embed_positions.to(torch_device) self.parent.assertTrue(torch.equal(model.encoder.embed_positions.weight, embed_positions.weight)) self.parent.assertTrue(torch.equal(model.decoder.embed_positions.weight, embed_positions.weight)) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = InformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class InformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (InformerModel, InformerForPrediction) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": InformerModel} if is_torch_available() else {} is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False def setUp(self): self.model_tester = InformerModelTester(self) self.config_tester = ConfigTester( self, config_class=InformerConfig, has_text_modality=False, prediction_length=self.model_tester.prediction_length, ) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.context_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "prediction_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="Informer does not have tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip def test_model_outputs_equivalence(self): pass @unittest.skip def test_determinism(self): pass @unittest.skip(reason="randomly selects U keys while calculating attentions") def test_batching_equivalence(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # # Input is 'static_categorical_features' not 'input_ids' def test_model_main_input_name(self): model_signature = inspect.signature(getattr(InformerModel, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(InformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] expected_arg_names.extend( [ "future_observed_mask", "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] if "future_observed_mask" in arg_names else [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) context_length = getattr(self.model_tester, "context_length", seq_len) prediction_length = getattr(self.model_tester, "prediction_length", seq_len) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, context_length], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, prediction_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_seq_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, context_length], ) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() @unittest.skip(reason="Model does not have input embeddings") def test_model_get_set_embeddings(self): pass def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") check_torch_load_is_safe() batch = torch.load(file, map_location=torch_device, weights_only=True) return batch @require_torch @slow class InformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch() torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], ).last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.4699, 0.7295, 0.8967], [0.4858, 0.3810, 0.9641], [-0.0233, 0.3608, 1.0303]], device=torch_device, ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") torch.manual_seed(0) with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_time_features=batch["future_time_features"], ).encoder_last_hidden_state # encoder distils the context length to 1/8th of the original length expected_shape = torch.Size((64, model.config.context_length // 8, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.4170, 0.9067, 0.8153], [0.3004, 0.7574, 0.7066], [0.6803, -0.6323, 1.2802]], device=torch_device ) torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") torch.manual_seed(0) with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([3400.8005, 4289.2637, 7101.9209], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1, atol=1e-1)
transformers/tests/models/informer/test_modeling_informer.py/0
{ "file_path": "transformers/tests/models/informer/test_modeling_informer.py", "repo_id": "transformers", "token_count": 10389 }
596
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Janus model.""" import re import tempfile import unittest from functools import reduce import numpy as np import pytest import requests from transformers import ( AutoProcessor, JanusConfig, JanusForConditionalGeneration, JanusModel, JanusVQVAE, JanusVQVAEConfig, is_torch_available, is_vision_available, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import ( Expectations, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image class JanusVisionText2TextModelTester: def __init__( self, parent, image_token_index=0, seq_length=25, initializer_range=0.02, text_config={ "model_type": "llama", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 512, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 1, }, is_training=True, vision_config={ "use_labels": True, "image_size": 20, "patch_size": 5, "num_image_tokens": 16, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_key_value_heads": 1, "num_hidden_layers": 2, "num_attention_heads": 4, "mlp_ratio": 2, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, "vision_feature_select_strategy": "default", "vision_feature_layer": -1, }, use_cache=False, vq_num_embeds=12, vq_embed_dim=12, vq_channel_multiplier=[1, 1], ): self.parent = parent self.initializer_range = initializer_range # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = image_token_index self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.pad_token_id = text_config["pad_token_id"] self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vision_config["num_channels"] self.image_size = vision_config["image_size"] self.num_image_tokens = vision_config["num_image_tokens"] self.use_cache = use_cache # vq model params self.vq_num_embeds = vq_num_embeds self.vq_embed_dim = vq_embed_dim self.vq_channel_multiplier = vq_channel_multiplier def get_vq_config(self): return { "embed_dim": self.vq_embed_dim, "num_embeddings": self.vq_num_embeds, "latent_channels": self.vq_embed_dim, "in_channels": 3, "base_channels": 32, # we have a GroupNorm of 32 groups, so can't do less "channel_multiplier": self.vq_channel_multiplier, "initializer_range": self.initializer_range, "projection_dim": 10, "image_token_embed_dim": 32, # Same as text model hidden size } def get_config(self): return JanusConfig( text_config=self.text_config, vision_config=self.vision_config, vq_config=self.get_vq_config(), image_token_id=self.image_token_index, ) def prepare_config_and_inputs(self): config = self.get_config() pixel_values = floats_tensor( [ self.batch_size, 3, self.image_size, self.image_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(self.pad_token_id).to(torch_device) # set the 16 first tokens to be image, and ensure that no other tokens are image tokens # do not change this unless you modified image size or patch size input_ids[input_ids == self.image_token_index] = self.pad_token_id input_ids[:, : self.num_image_tokens] = self.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids, "generation_mode": "text", # Required to perform text generation instead of image generation. } return config, inputs_dict @require_torch class JanusVisionText2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (JanusModel, JanusForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (JanusForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = JanusVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=JanusConfig, has_text_modality=False) def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) # SigLip has one shared cls attr for all models, so we assign both submodels heer vision_attn = language_attn = "sdpa" if model._supports_sdpa else "eager" if hasattr(model_sdpa, "vision_model") and hasattr(model_sdpa, "language_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == vision_attn) self.assertTrue(model_sdpa.language_model.config._attn_implementation == language_attn) self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if any(re.finditer(r"Attention(?!Pool)", class_name)): self.assertTrue(submodule.config._attn_implementation == "eager") for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if any(re.finditer(r"Attention(?!Pool)", class_name)): self.assertTrue(submodule.config._attn_implementation == "sdpa") def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") """ We skip some parameters when checking for gradient checkpointing: - VQ model, as its training is not supported. - A few other modules used for image generation. """ skip_patterns = ["vqmodel", "generation_embeddings", "generation_aligner", "generation_head"] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): if ( model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ] or not model_class.supports_gradient_checkpointing ): # TODO (ydshieh): use `skipTest` once pytest-dev/pytest-subtests/pull/169 is merged # self.skipTest(reason=f"`supports_gradient_checkpointing` is False for {model_class.__name__}.") continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # unfreeze additional layers for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() if self.test_all_params_have_gradient: for k, v in model.named_parameters(): if v.requires_grad and not reduce(lambda t, s: t | (s in k), skip_patterns, False): self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") else: pass @unittest.skip("There are recompilations in Janus") # TODO (joao, raushan): fix me @pytest.mark.torch_compile_test def test_generate_compile_model_forward_fullgraph(self): pass class JanusVQModelTester: def __init__( self, parent, batch_size=5, is_training=False, initializer_range=0.02, image_size=30, num_embeds=12, base_channels=32, # we have a GroupNorm of 32 groups, so can't do less embed_dim=12, channel_multiplier=[1, 2], patch_size=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.initializer_range = initializer_range self.image_size = image_size self.base_channels = base_channels self.num_embeds = num_embeds self.embed_dim = embed_dim self.channel_multiplier = channel_multiplier self.num_patches = image_size // patch_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, 3, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return JanusVQVAEConfig( embed_dim=self.embed_dim, num_embeddings=self.num_embeds, latent_channels=self.embed_dim, in_channels=3, base_channels=self.base_channels, channel_multiplier=self.channel_multiplier, initializer_range=self.initializer_range, resolution=self.image_size, num_patches=self.num_patches, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class JanusVQModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (JanusVQVAE,) if is_torch_available() else () test_head_masking = False test_pruning = False fx_compatible = False has_attentions = False test_resize_embeddings = False def setUp(self): self.model_tester = JanusVQModelTester(self) self.config_tester = ConfigTester( self, config_class=JanusVQVAEConfig, has_text_modality=False, common_properties=["embed_dim", "num_embeddings"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_cpu_offload(self): pass @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_disk_offload_bin(self): pass @unittest.skip("Janus VQ module cannot offload due to using `self.weight` directly") def test_disk_offload_safetensors(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_hidden_states_output(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_model_outputs_equivalence(self): pass @unittest.skip("Janus VQ module has no get/set embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip("Janus VQ module has no hidden states") def test_retain_grad_hidden_states_attentions(self): pass class JanusIntegrationTest(unittest.TestCase): def setUp(self): self.model_id = "deepseek-community/Janus-Pro-1B" @slow def test_model_text_generation(self): model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto") model.eval() processor = AutoProcessor.from_pretrained(self.model_id) image = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) prompt = "<image_placeholder>\nDescribe what do you see here and tell me about the history behind it?" inputs = processor(images=image, text=prompt, generation_mode="text", return_tensors="pt").to(model.device) output = model.generate(**inputs, max_new_tokens=20, generation_mode="text", do_sample=False) EXPECTED_DECODED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\n\nDescribe what do you see here and tell me about the history behind it?\n\nThe image depicts the constellation of Leo, which is often referred to as the "Lion"' # fmt: skip text = processor.decode(output[0], skip_special_tokens=True) self.assertEqual( text, EXPECTED_DECODED_TEXT, ) @slow def test_model_text_generation_batched(self): model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto") processor = AutoProcessor.from_pretrained(self.model_id) image_1 = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) image_2 = Image.open( requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw ) prompts = [ "<image_placeholder>\nDescribe what do you see here and tell me about the history behind it?", "What constellation is this image showing?<image_placeholder>\n", ] inputs = processor( images=[image_1, image_2], text=prompts, generation_mode="text", padding=True, return_tensors="pt" ).to(model.device, torch.float16) EXPECTED_TEXT_COMPLETION = [ 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\n\nDescribe what do you see here and tell me about the history behind it?\n\nThe image depicts the constellation of Leo, which is often referred to as the "Lion"', "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nWhat constellation is this image showing?\n\nThe image shows a constellation that is shaped like a stylized figure with a long tail. This", ] generated_ids = model.generate(**inputs, max_new_tokens=20, generation_mode="text", do_sample=False) text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_model_text_generation_with_multi_image(self): model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto") processor = AutoProcessor.from_pretrained(self.model_id) image_1 = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) image_2 = Image.open( requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw ) prompt = "What do these two images <image_placeholder> and <image_placeholder> have in common?" inputs = processor(images=[image_1, image_2], text=prompt, generation_mode="text", return_tensors="pt").to( model.device, torch.float16 ) EXPECTED_TEXT_COMPLETION = ['You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nWhat do these two images and have in common?\n\nThe two images you provided are of the same constellation. The first image shows the constellation of Leo, and the second image shows the constellation of Ursa Major. Both constellations are part of'] # fmt: skip generated_ids = model.generate(**inputs, max_new_tokens=40, do_sample=False) text = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_model_generate_images(self): model = JanusForConditionalGeneration.from_pretrained(self.model_id, device_map="auto") processor = AutoProcessor.from_pretrained(self.model_id) inputs = processor( text=["A portrait of young girl. masterpiece, film grained, best quality."], padding=True, generation_mode="image", return_tensors="pt", ).to(model.device) self.assertTrue(inputs.input_ids.shape[1] == 17) out = model.generate( **inputs, generation_mode="image", do_sample=False, ) # It should run for num_image_tokens in this case 576. self.assertTrue(out.shape[1] == 576) # fmt: off expected_tokens = Expectations( { ("rocm", None): [ 10367, 1380, 4841, 15155, 1224, 16361, 15834, 13722, 15258, 8321, 10496, 14532, 8770, 12353, 5481, 11484, 2585, 8587, 3201, 14292, 3356, 2037, 3077, 6107, 3758, 2572, 9376, 13219, 6007, 14292, 12696, 10666, 10046, 13483, 8282, 9101, 5208, 4260, 13886, 13335, 6135, 2316, 15423, 311, 5460, 12218, 14172, 8583, 14577, 3648 ], ("rocm", (9, 5)): [ 4484, 4015, 15750, 506, 3758, 11651, 8597, 5739, 4861, 971, 14985, 14834, 15438, 7548, 1820, 1465, 13529, 12761, 10503, 12761, 14303, 6155, 4015, 11766, 705, 15736, 14146, 10417, 1951, 7713, 14305, 15617, 6169, 2706, 8006, 14893, 3855, 10188, 15652, 6297, 1097, 12108, 15038, 311, 14998, 15165, 897, 4044, 1762, 4676 ], ("cuda", None): [ 4484, 4015, 15750, 506, 3758, 11651, 8597, 5739, 4861, 971, 14985, 14834, 15438, 7548, 1820, 1465, 13529, 12761, 10503, 12761, 14303, 6155, 4015, 11766, 705, 15736, 14146, 10417, 1951, 7713, 14305, 15617, 6169, 2706, 8006, 14893, 3855, 10188, 15652, 6297, 1097, 12108, 15038, 311, 14998, 15165, 897, 4044, 1762, 4676 ], } ) expected_tokens = torch.tensor(expected_tokens.get_expectation()).to(model.device) # fmt: on # Compare the first 50 generated tokens. self.assertTrue(torch.allclose(expected_tokens, out[0][:50])) # Decode generated tokens to pixel values and postprocess them. decoded_pixel_values = model.decode_image_tokens(out) images = processor.postprocess(list(decoded_pixel_values.float()), return_tensors="np") self.assertTrue(images["pixel_values"].shape == (1, 384, 384, 3)) self.assertTrue(isinstance(images["pixel_values"], np.ndarray))
transformers/tests/models/janus/test_modeling_janus.py/0
{ "file_path": "transformers/tests/models/janus/test_modeling_janus.py", "repo_id": "transformers", "token_count": 10272 }
597
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import AutoProcessor, Llama4Processor, PreTrainedTokenizerFast from transformers.testing_utils import require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import Llama4ImageProcessorFast @require_vision class Llama4ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Llama4Processor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = Llama4ImageProcessorFast(max_patches=1, size={"height": 20, "width": 20}) tokenizer = PreTrainedTokenizerFast.from_pretrained("unsloth/Llama-3.2-11B-Vision-Instruct-unsloth-bnb-4bit") processor_kwargs = cls.prepare_processor_dict() processor = Llama4Processor(image_processor, tokenizer, **processor_kwargs) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname)
transformers/tests/models/llama4/test_processing_llama4.py/0
{ "file_path": "transformers/tests/models/llama4/test_processing_llama4.py", "repo_id": "transformers", "token_count": 657 }
598
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Llava-NeXT model.""" import unittest import numpy as np import requests from huggingface_hub import hf_hub_download from parameterized import parameterized from transformers import ( AutoProcessor, LlavaOnevisionConfig, LlavaOnevisionForConditionalGeneration, LlavaOnevisionModel, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( Expectations, cleanup, require_bitsandbytes, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_vision_available(): from PIL import Image class LlavaOnevisionVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=1, video_token_index=2, projector_hidden_act="gelu", seq_length=7, vision_feature_select_strategy="full", vision_feature_layer=-1, text_config={ "model_type": "qwen2", "seq_length": 7, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 580, "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 0, }, is_training=True, vision_config={ "image_size": 16, "patch_size": 8, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.pad_token_id = text_config["pad_token_id"] self.num_image_tokens = 10 self.seq_length = seq_length + self.num_image_tokens self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = 3 self.image_size = 30 self.image_grid_pinpoints = [[16, 16]] def get_config(self): return LlavaOnevisionConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, video_token_index=self.video_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, image_grid_pinpoints=self.image_grid_pinpoints, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, 3, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) input_ids[input_ids == config.image_token_index] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_index labels = torch.zeros((self.batch_size, self.seq_length), dtype=torch.long, device=torch_device) labels[:, : self.num_image_tokens] == self.ignore_index inputs_dict = { "pixel_values": pixel_values, "image_sizes": torch.tensor([[45, 45]] * self.batch_size), "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, } return config, inputs_dict @require_torch class LlavaOnevisionForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `LlavaOnevisionForConditionalGeneration`. """ all_model_classes = ( ( LlavaOnevisionModel, LlavaOnevisionForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-text-to-text": LlavaOnevisionForConditionalGeneration} if is_torch_available() else {} ) test_pruning = False test_head_masking = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = LlavaOnevisionVisionText2TextModelTester(self) common_properties = ["image_token_index", "video_token_index", "vision_feature_layer"] self.config_tester = ConfigTester( self, config_class=LlavaOnevisionConfig, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): # LLaVa Onevision has SigLIP backbone which init weights differently from CLIP if "image_newline" in name or "vision_tower" in name: continue elif param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_odd_sized_image(self): # prepare model configuration config = self.model_tester.get_config() # prepare input num_image_tokens = 10 pixel_values = floats_tensor([1, 2, 3, config.vision_config.image_size, config.vision_config.image_size]) input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2 input_ids[:, :num_image_tokens] = config.image_token_index attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) inputs_dict = { "pixel_values": pixel_values, "image_sizes": torch.tensor([[13, 16]]), # odd-sized image "input_ids": input_ids, "attention_mask": attention_mask, } # forward with odd-sized image input for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model(**inputs_dict) @parameterized.expand( [ (-1,), ([-1],), ([-1, -2],), ], ) def test_vision_feature_layers(self, vision_feature_layer): """ Test that we can use either one vision feature layer, or a list of vision feature layers. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.vision_feature_layer = vision_feature_layer num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer) hidden_size = config.vision_config.hidden_size expected_features = hidden_size * num_feature_layers for model_class in self.all_model_classes: model = model_class(config).to(torch_device) # We should have the right number of input features, # and should be able to run a forward pass without exploding base_model = getattr(model, "model", model) assert base_model.multi_modal_projector.linear_1.in_features == expected_features model(**input_dict) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( "VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test" ) def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): pass @require_torch class LlavaOnevisionForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", padding_side="left" ) image_file = hf_hub_download( repo_id="raushan-testing-hf/images_test", filename="llava_v1_5_radar.jpg", repo_type="dataset" ) video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) self.image = Image.open(image_file) self.video = np.load(video_file) self.prompt_image = "user\n<image>\nWhat do you see in this image?<|im_end|>\n<|im_start|>assistant\n" self.prompt_video = "user\n<video>\nWhat do you see in this video?<|im_end|>\n<|im_start|>assistant\n" def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_bitsandbytes def test_small_model_integration_test(self): model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device ) inputs = self.processor(images=self.image, text=self.prompt_image, return_tensors="pt").to( torch_device, torch.float16 ) self.assertTrue(inputs.input_ids.shape[1] == 6567) # should expand num-image-tokens times self.assertTrue(inputs.pixel_values.shape == torch.Size([1, 10, 3, 384, 384])) self.assertTrue(inputs.image_sizes.tolist() == [[899, 1024]]) # verify single forward pass inputs = inputs.to(torch_device) # verify generation output = model.generate(**inputs, max_new_tokens=100) EXPECTED_DECODED_TEXTS = Expectations( { ("xpu", 3): 'user\n\nWhat do you see in this image?\nassistant\nThe image is a radar chart that compares the performance of different models in a specific task, likely related to natural language processing or machine learning. The chart is divided into several axes, each representing a different model or method. The models are color-coded and labeled with their respective names. The axes are labeled with terms such as "VQA," "GQA," "MQA," "VQAv2," "MM-Vet," "LLaVA-Bench," "LLaVA-1', ("cuda", 7): 'user\n\nWhat do you see in this image?\nassistant\nThe image is a radar chart that compares the performance of different models in a specific task, likely related to natural language processing or machine learning. The chart is divided into several axes, each representing a different model or method. The models are color-coded and labeled with their respective names. The axes are labeled with terms such as "VQA," "GQA," "MQA," "VQAv2," "MM-Vet," "LLaVA-Bench," "LLaVA-1', ("cuda", 8): 'user\n\nWhat do you see in this image?\nassistant\nThe image is a radar chart that compares the performance of different models in a specific task, likely related to natural language processing or machine learning. The chart is divided into several axes, each representing a different model or method. The models are color-coded and labeled with their respective names. The axes are labeled with terms such as "VQA," "GQA," "MQA," "VIZ," "TextVQA," "SQA-IMG," and "MQE." The radar chart shows', } ) # fmt: skip EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() DECODED_TEXT = self.processor.decode(output[0], skip_special_tokens=True) self.assertEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_batch(self): model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device ) inputs = self.processor( text=[self.prompt_image, self.prompt_video], images=self.image, videos=self.video, return_tensors="pt", padding=True, ).to(torch_device, torch.float16) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = ['user\n\nWhat do you see in this image?\nassistant\nThe image is a radar chart that compares the performance of different models in a specific task, likely related', 'user\n\nWhat do you see in this video?\nassistant\nA child wearing a light blue sleeveless top and pink pants is seen sitting on a bed, eng'] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_video(self): # related to (#29835) model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device, ) inputs = self.processor(text=self.prompt_video, videos=self.video, return_tensors="pt").to( torch_device, torch.float16 ) # verify generation output = model.generate(**inputs, max_new_tokens=40) EXPECTED_DECODED_TEXT = 'user\n\nWhat do you see in this video?\nassistant\nA child wearing a light blue sleeveless top and pink pants is seen sitting on a bed, engrossed in reading a book.' # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_multi_image(self): # related to (#29835) model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device, ) url = "https://www.ilankelman.org/stopsigns/australia.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = ( "user\n<image><image>\nWhat is the difference between these images?<|im_end|>\n<|im_start|>assistant\n" ) inputs = self.processor(text=prompt, images=[self.image, image], return_tensors="pt").to( torch_device, torch.float16 ) # verify generation output = model.generate(**inputs, max_new_tokens=40) EXPECTED_DECODED_TEXT = "user\n\nWhat is the difference between these images?\nassistant\nThe images you've provided appear to be related to a graphical representation of a radar chart, which is a type of data visualization used to show the distribution of a particular variable across a geographic area. The" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_multi_image_nested(self): # related to (#34585) model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device, ) url = "https://www.ilankelman.org/stopsigns/australia.jpg" image = Image.open(requests.get(url, stream=True).raw) prompts = [ "user\nTell me about the french revolution.<|im_end|>\n<|im_start|>assistant\n", # text-only case "user\n<image><image>\nWhat is the difference between these images?<|im_end|>\n<|im_start|>assistant\n", self.prompt_image, ] images_nested = [[], [image, self.image], [self.image]] inputs = self.processor( text=prompts, images=images_nested, return_tensors="pt", padding=True, ).to(torch_device, torch.float16) # verify generation output = model.generate(**inputs, max_new_tokens=40) EXPECTED_DECODED_TEXT = ["user\nTell me about the french revolution.\nassistant\nThe French Revolution! A pivotal event in modern history that had a profound impact on the course of Western civilization. Here's a brief overview:\n\n**Background**\n\nIn the late 18th century,", "user\n\nWhat is the difference between these images?\nassistant\nThe first image shows a stop sign with a traditional Chinese architectural background, while the second image displays a radar chart with various algorithms and models, including BLIP-2, InstructBLIP, Q", "user\n\nWhat do you see in this image?\nassistant\nThe image is a radar chart that compares the performance of different models in a specific task, likely related to natural language processing or machine learning. The chart is divided into several axes, each representing a different"] # fmt: skip DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True) self.assertListEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_multi_video(self): # related to (#29835) model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device, ) prompt = "user\n<video><video>\nAre these videos identical?<|im_end|>\n<|im_start|>assistant\n" inputs = self.processor(text=prompt, videos=[self.video, self.video], return_tensors="pt").to( torch_device, torch.float16 ) # verify generation output = model.generate(**inputs, max_new_tokens=40) EXPECTED_DECODED_TEXT = "user\n\nAre these videos identical?\nassistant\nNo, the video is not identical; it shows slight variations in the child's actions and the background." # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch_different_resolutions(self): model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e" cats_image = Image.open(requests.get(url, stream=True).raw) lowres_img = Image.open(requests.get(lowres_url, stream=True).raw) inputs = self.processor( text=[self.prompt_image, self.prompt_image], images=[lowres_img, cats_image], return_tensors="pt", padding=True, ).to(torch_device, torch.float16) # verify generation output = model.generate(**inputs, max_new_tokens=50) EXPECTED_DECODED_TEXT = [ 'user\n\nWhat do you see in this image?\nassistant\nThe image shows a scene of two deer in a grassy area with trees in the background. The weather appears to be foggy, giving the scene a misty and somewhat mysterious atmosphere. The deer are standing close to each other, possibly grazing or', 'user\n\nWhat do you see in this image?\nassistant\nIn the tranquil setting of this image, two cats are enjoying a peaceful nap on a vibrant pink blanket. The cat on the left, with its gray and black striped fur, is lying on its side, its head comfortably resting on the blanket. Its', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_batch_matches_single(self): model = LlavaOnevisionForConditionalGeneration.from_pretrained( "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", dtype="float16", device_map=torch_device, ) url = "http://images.cocodataset.org/val2017/000000039769.jpg" lowres_url = "https://4.img-dpreview.com/files/p/TS560x560~forums/56876524/03975b28741443319e9a94615e35667e" cats_image = Image.open(requests.get(url, stream=True).raw) lowres_img = Image.open(requests.get(lowres_url, stream=True).raw) inputs_batched = self.processor( text=[self.prompt_image, self.prompt_image], images=[lowres_img, cats_image], return_tensors="pt", padding=True, ).to(torch_device, torch.float16) inputs_single = self.processor( text=self.prompt_image, images=lowres_img, return_tensors="pt", padding=True ).to(torch_device, torch.float16) # verify generation output_batched = model.generate(**inputs_batched, max_new_tokens=50) output_single = model.generate(**inputs_single, max_new_tokens=50) self.assertEqual( self.processor.decode(output_batched[0], skip_special_tokens=True), self.processor.decode(output_single[0], skip_special_tokens=True), )
transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py/0
{ "file_path": "transformers/tests/models/llava_onevision/test_modeling_llava_onevision.py", "repo_id": "transformers", "token_count": 10513 }
599
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MGP-STR model.""" import unittest import requests from transformers import MgpstrConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MgpstrForSceneTextRecognition, MgpstrModel if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor class MgpstrModelTester: def __init__( self, parent, is_training=False, batch_size=13, image_size=(32, 128), patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=99, num_wordpiece_labels=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, mlp_ratio=4.0, patch_embeds_hidden_size=257, output_hidden_states=None, ): self.parent = parent self.is_training = is_training self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.patch_embeds_hidden_size = patch_embeds_hidden_size self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) config = self.get_config() return config, pixel_values def get_config(self): return MgpstrConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, max_token_length=self.max_token_length, num_character_labels=self.num_character_labels, num_bpe_labels=self.num_bpe_labels, num_wordpiece_labels=self.num_wordpiece_labels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, mlp_ratio=self.mlp_ratio, output_hidden_states=self.output_hidden_states, ) def create_and_check_model(self, config, pixel_values): model = MgpstrForSceneTextRecognition(config) model.to(torch_device) model.eval() with torch.no_grad(): generated_ids = model(pixel_values) self.parent.assertEqual( generated_ids[0][0].shape, (self.batch_size, self.max_token_length, self.num_character_labels) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MgpstrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": MgpstrForSceneTextRecognition, "image-feature-extraction": MgpstrModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False def setUp(self): self.model_tester = MgpstrModelTester(self) self.config_tester = ConfigTester(self, config_class=MgpstrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batching_equivalence(self, atol=1e-4, rtol=1e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="MgpstrModel does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @unittest.skip(reason="MgpstrModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.patch_embeds_hidden_size, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override as the `logit_scale` parameter initialization is different for MgpstrModel def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if isinstance(param, (nn.Linear, nn.Conv2d, nn.LayerNorm)): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass # We will verify our results on an image from the IIIT-5k dataset def prepare_img(): url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png" im = Image.open(requests.get(url, stream=True).raw).convert("RGB") return im @require_vision @require_torch class MgpstrModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "alibaba-damo/mgp-str-base" model = MgpstrForSceneTextRecognition.from_pretrained(model_name).to(torch_device) processor = MgpstrProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(inputs) # verify the logits self.assertEqual(outputs.logits[0].shape, torch.Size((1, 27, 38))) out_strs = processor.batch_decode(outputs.logits) expected_text = "ticket" self.assertEqual(out_strs["generated_text"][0], expected_text) expected_slice = torch.tensor( [[[-39.5397, -44.4024, -36.1844], [-61.4709, -63.8639, -58.3454], [-74.0225, -68.5494, -71.2164]]], device=torch_device, ) torch.testing.assert_close(outputs.logits[0][:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/mgp_str/test_modeling_mgp_str.py/0
{ "file_path": "transformers/tests/models/mgp_str/test_modeling_mgp_str.py", "repo_id": "transformers", "token_count": 4317 }
600
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MobileNetV2 model.""" import unittest from transformers import MobileNetV2Config from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation, MobileNetV2Model if is_vision_available(): from PIL import Image from transformers import MobileNetV2ImageProcessor class MobileNetV2ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV2ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, depth_divisible_by=8, min_depth=8, expand_ratio=6, output_stride=32, first_layer_is_expansion=True, finegrained_output=True, tf_padding=True, hidden_act="relu6", last_hidden_size=1280, classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.depth_divisible_by = depth_divisible_by self.min_depth = min_depth self.expand_ratio = expand_ratio self.tf_padding = tf_padding self.output_stride = output_stride self.first_layer_is_expansion = first_layer_is_expansion self.finegrained_output = finegrained_output self.hidden_act = hidden_act self.last_hidden_size = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier) self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV2Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) self.parent.assertEqual( result.pooler_output.shape, (self.batch_size, self.last_hidden_size), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV2ForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) result = model(pixel_values, labels=pixel_labels) self.parent.assertEqual( result.logits.shape, ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( (MobileNetV2Model, MobileNetV2ForImageClassification, MobileNetV2ForSemanticSegmentation) if is_torch_available() else () ) pipeline_model_mapping = ( { "image-feature-extraction": MobileNetV2Model, "image-classification": MobileNetV2ForImageClassification, "image-segmentation": MobileNetV2ForSemanticSegmentation, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = MobileNetV2ModelTester(self) self.config_tester = MobileNetV2ConfigTester(self, config_class=MobileNetV2Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="MobileNetV2 does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 16 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/mobilenet_v2_1.4_224" model = MobileNetV2Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV2ImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV2ForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expectations = Expectations( { (None, None): [0.2445, -1.1993, 0.1905], ("cuda", 8): [0.2445, -1.1993, 0.1905], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4) @slow def test_inference_semantic_segmentation(self): model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") model = model.to(torch_device) image_processor = MobileNetV2ImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513") image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # verify the logits expected_shape = torch.Size((1, 21, 65, 65)) self.assertEqual(logits.shape, expected_shape) expectations = Expectations( { (None, None): [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ], ("cuda", 8): [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3742], [-2.4226, -2.3028, -2.6836], [-2.7820, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9645, 4.8734]], ], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py/0
{ "file_path": "transformers/tests/models/mobilenet_v2/test_modeling_mobilenet_v2.py", "repo_id": "transformers", "token_count": 5906 }
601
# Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class MPNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_large_model_config(self): return MPNetConfig.from_pretrained("microsoft/mpnet-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_mpnet_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mpnet_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mpnet_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mpnet_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MPNetForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mpnet_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MPNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = True def setUp(self): self.model_tester = MPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) @unittest.skip(reason="TFMPNet adds poolers to all models, unlike the PT model class.") def test_tf_from_pt_safetensors(self): return @require_torch class MPNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = MPNetModel.from_pretrained("microsoft/mpnet-base") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/mpnet/test_modeling_mpnet.py/0
{ "file_path": "transformers/tests/models/mpnet/test_modeling_mpnet.py", "repo_id": "transformers", "token_count": 4627 }
602
# Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MVP model.""" import copy import tempfile import unittest import timeout_decorator # noqa from transformers import MvpConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpTokenizer, ) from transformers.models.mvp.modeling_mvp import MvpDecoder, MvpEncoder, shift_tokens_right def prepare_mvp_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MvpModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_mvp_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MvpConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MvpModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MvpModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MvpEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MvpDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MvpHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() labels = _long_tensor([2] * batch_size).to(torch_device) config.num_labels = 3 model = MvpForSequenceClassification(config) model.to(torch_device) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels) expected_shape = torch.Size((batch_size, config.num_labels)) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() sequence_labels = ids_tensor([batch_size], 2).to(torch_device) model = MvpForQuestionAnswering(config) model.to(torch_device) outputs = model( input_ids=input_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) self.assertIsInstance(outputs["loss"].item(), float) @timeout_decorator.timeout(1) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device) lm_model = MvpForConditionalGeneration(config) lm_model.to(torch_device) outputs = lm_model(input_ids=input_ids, labels=lm_labels) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_lm_uneven_forward(self): config = MvpConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long) config = MvpConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) lm_model = MvpForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 generated_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @slow def test_tokenization(self): tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp") examples = [" Hello world", " DomDramg"] # need leading spaces for equality fairseq_results = [ torch.tensor([0, 20920, 232, 2]), torch.tensor([0, 11349, 495, 4040, 571, 2]), ] for ex, desired_result in zip(examples, fairseq_results): mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = MvpForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_resize_tokens_embeddings_more(self): config, input_ids, _ = self._get_config_and_data() def _get_embs(m): return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone()) model = MvpForConditionalGeneration(config).eval().to(torch_device) input, output = _get_embs(model) self.assertTrue(torch.eq(input, output).all()) new_vocab_size = 45 model.resize_token_embeddings(new_vocab_size) input_new, output_new = _get_embs(model) self.assertEqual(input_new.shape, (new_vocab_size, config.d_model)) self.assertEqual(output_new.shape, (new_vocab_size, config.d_model)) self.assertTrue(torch.eq(input_new, output_new).all()) @require_torch class MvpModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (MvpModel, MvpForConditionalGeneration, MvpForSequenceClassification, MvpForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MvpModel, "fill-mask": MvpForConditionalGeneration, "question-answering": MvpForQuestionAnswering, "summarization": MvpForConditionalGeneration, "text-classification": MvpForSequenceClassification, "text-generation": MvpForCausalLM, "text2text-generation": MvpForConditionalGeneration, "translation": MvpForConditionalGeneration, "zero-shot": MvpForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = MvpModelTester(self) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # MvpForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MvpModel, MvpForConditionalGeneration, MvpForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MvpForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @require_sentencepiece @require_tokenizers class MvpModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return MvpTokenizer.from_pretrained("RUCAIBox/mvp") @slow def test_inference_no_head(self): model = MvpModel.from_pretrained("RUCAIBox/mvp").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = input_ids.ne(model.config.pad_token_id) with torch.no_grad(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.3461, 0.3624, 0.2689], [0.3461, 0.3624, 0.2689], [-0.1562, 1.1637, -0.3784]], device=torch_device ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) @slow def test_summarization_inference(self): model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = """ Listen to local radio broadcasts for advertisements that reference casinos in your area.\nIf none are in your area, listen to national radio broadcasts for advertisements of casinos in other areas.\nNote the location that is mentioned in each advertisement that involves a casino.\nIf no locations are mentioned, note any additional contact information, such as a website or phone number. Use that information to find out where the casinos are.;\n,\n\nIf you learn about more than 1 casino on the radio, use the Internet to search the distance between your location and each casino. Sites such as maps.google.com or mapquest.com will help you in this search.'""" # fmt: skip EXPECTED_SUMMARY = "Listen to the radio.\nUse the Internet." dct = tok.batch_encode_plus( [PGE_ARTICLE], return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate(**dct) decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True) self.assertEqual(EXPECTED_SUMMARY, decoded[0]) class MvpStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MvpConfig( vocab_size=self.vocab_size, d_model=self.d_model, encoder_layers=self.decoder_layers, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MvpDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MvpDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model( next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MvpDecoder, MvpForCausalLM) if is_torch_available() else () fx_comptatible = True test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MvpStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MvpConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return
transformers/tests/models/mvp/test_modeling_mvp.py/0
{ "file_path": "transformers/tests/models/mvp/test_modeling_mvp.py", "repo_id": "transformers", "token_count": 15197 }
603
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OLMo model.""" import unittest import pytest from packaging import version from parameterized import parameterized from transformers import OlmoConfig, is_torch_available, set_seed from transformers.generation.configuration_utils import GenerationConfig from transformers.models.auto.tokenization_auto import AutoTokenizer from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from transformers.testing_utils import ( require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OlmoForCausalLM, OlmoModel, ) class OlmoModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="silu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return OlmoConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = OlmoModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class OlmoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OlmoModel, OlmoForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": OlmoModel, "text-generation": OlmoForCausalLM, } if is_torch_available() else {} ) test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = OlmoModelTester(self) self.config_tester = ConfigTester(self, config_class=OlmoConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OLMo does not support head pruning.") def test_headmasking(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = OlmoModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = OlmoModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class OlmoIntegrationTest(unittest.TestCase): @slow def test_model_1b_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = OlmoForCausalLM.from_pretrained("allenai/OLMo-1B-hf", device_map="auto") out = model(torch.tensor(input_ids)).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[2.2869, 0.3315, 0.9876, 1.4146, 1.8804, 2.0430, 1.7055, 1.2065]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([2.5551, -1.1230, 11.0510, 12.4977, 7.9651, 7.2342, 6.1885, 7.8340, 9.9847, 12.6695, 12.2345, 10.7970, 8.4749, 14.2483, 12.9588, 13.9233, 11.0496, 5.5749, 7.4466, 7.7914, 6.8440, 5.8951, 4.8180, 4.1935, 4.5216, 4.7256, 3.9553, 12.2870, 12.4990, 8.1591]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-hf", device_map="auto") out = model(torch.tensor(input_ids)).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[0.0271, 0.0249, -0.0578, -0.0870, 0.0167, 0.0710, 0.1002, 0.0677]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([-1.7433, -1.6685, 7.4941, 6.1506, 0.1364, -0.1127, 1.3224, 4.5458, 4.2068, 5.8296, 7.4723, 2.7925, 3.1245, 10.8872, 10.0758, 10.6717, 7.0945, 1.2398, 3.6766, 4.2365, 2.5655, 2.2222, 1.7418, 0.5223, 0.7753, 1.0938, 0.6723, 6.2522, 6.2264, 1.8105]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_twin_2t_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-Twin-2T-hf", device_map="auto") out = model(torch.tensor(input_ids)).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-0.3636, -0.3825, -0.4800, -0.3696, -0.8388, -0.9737, -0.9849, -0.8356]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([-2.0833, -1.9234, 8.7312, 7.8049, 1.0372, 0.8941, 3.1548, 1.8502, 5.5511, 5.5793, 8.1166, 4.5906, 1.8691, 11.6377, 8.9858, 11.6447, 7.4549, 1.4725, 2.8399, 2.7568, 1.4011, 1.6958, 0.5572, 0.5231, 0.3068, 0.5364, 0.6769, 7.9636, 8.2379, 1.7950]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that \nthe speed of light is the same for all observers.\n\nThe theory of relativity is a theory of physics that describes the \nmovement of objects in space and time.\n\nThe theory of relativity is a theory of physics that describes the \nmovement of objects in space and time.\n\n""" prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-7B-hf", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt") model = OlmoForCausalLM.from_pretrained("allenai/OLMo-7B-hf", device_map="auto") # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @require_tokenizers def test_fast_special_tokens(self): fast_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMo-1B-hf") original_add_eos_token = fast_tokenizer.add_eos_token fast_tokenizer.add_eos_token = False fast = fast_tokenizer.encode("A sample test") self.assertEqual(fast, [34, 3410, 1071]) fast_tokenizer.add_eos_token = True fast = fast_tokenizer.encode("A sample test") self.assertEqual(fast, [34, 3410, 1071, 50279]) fast_tokenizer.add_eos_token = original_add_eos_token @require_tokenizers def test_simple_encode_decode(self): rust_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMo-1B-hf") self.assertEqual(rust_tokenizer.encode("This is a test"), [1552, 310, 247, 1071]) self.assertEqual(rust_tokenizer.decode([1552, 310, 247, 1071], skip_special_tokens=True), "This is a test") # bytefallback showcase self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [20025, 46549, 5225, 48561, 33656, 238, 12105]) # fmt: skip self.assertEqual( rust_tokenizer.decode([20025, 46549, 5225, 48561, 33656, 238, 12105], skip_special_tokens=True), "生活的真谛是", ) # Inner spaces showcase self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50276, 12092]) self.assertEqual(rust_tokenizer.decode([12764, 50276, 12092], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50275, 12092]) self.assertEqual(rust_tokenizer.decode([12764, 50275, 12092], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.encode(""), []) self.assertEqual(rust_tokenizer.encode(" "), [209]) self.assertEqual(rust_tokenizer.encode(" "), [50276]) self.assertEqual(rust_tokenizer.encode(" Hello"), [24387]) @pytest.mark.torch_export_test @slow def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) olmo_model = "allenai/OLMo-1B-hf" tokenizer = AutoTokenizer.from_pretrained(olmo_model, pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETION = [ "Simply put, the theory of relativity states that \nthe speed of light is the same in all reference frames.\n\nThe speed of light", ] max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = OlmoForCausalLM.from_pretrained( olmo_model, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Simply put, the theory of relativity states that "] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + eager eager_generated_ids = model.generate( **prompt_tokens, max_new_tokens=max_new_tokens, do_sample=False, cache_implementation=cache_implementation ) eager_generated_text = tokenizer.batch_decode(eager_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, eager_generated_text) # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
transformers/tests/models/olmo/test_modeling_olmo.py/0
{ "file_path": "transformers/tests/models/olmo/test_modeling_olmo.py", "repo_id": "transformers", "token_count": 7995 }
604
# Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OPT model.""" import copy import tempfile import unittest import timeout_decorator # noqa from transformers import OPTConfig, is_torch_available from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPT2Tokenizer, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, ) def prepare_opt_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "attention_mask": attention_mask, } class OPTModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_opt_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return OPTConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = OPTModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) @require_torch class OPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (OPTModel, OPTForCausalLM, OPTForSequenceClassification, OPTForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": OPTModel, "question-answering": OPTForQuestionAnswering, "text-classification": OPTForSequenceClassification, "text-generation": OPTForCausalLM, "zero-shot": OPTForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = False fx_compatible = False # Broken by attention refactor cc @Cyrilvallez test_pruning = False test_missing_keys = False test_head_masking = False # new attn API doesn't support head mask # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = OPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OPTConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (OPTModel,): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = OPTForCausalLM(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_opt_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = OPTForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_opt_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = OPTForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch class OPTModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids=input_ids).last_hidden_state expected_shape = torch.Size((1, 11, 512)) self.assertEqual(output.shape, expected_shape) # expected value works for CPU, as well as GPU (with TF32 disabled) expected_slice = torch.tensor( [ [-0.28726277, -1.9241608, -0.3058734], [-1.2737825, -0.13332152, -0.18766522], [0.41159445, 0.1191957, -1.3107123], ], device=torch_device, ) assert_tensors_close(output[0, :3, :3], expected_slice, atol=5e-5) @require_torch @slow class OPTEmbeddingsTest(unittest.TestCase): def setUp(self): super().setUp() self.path_model = "facebook/opt-350m" def test_load_model(self): try: _ = OPTForCausalLM.from_pretrained(self.path_model) except BaseException: self.fail("Failed loading model") def test_logits(self): model = OPTForCausalLM.from_pretrained(self.path_model) model = model.eval() tokenizer = GPT2Tokenizer.from_pretrained(self.path_model) prompts = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False inputs = tokenizer(prompts, return_tensors="pt", padding=True, add_special_tokens=False) logits = model(inputs.input_ids, attention_mask=inputs.attention_mask)[0].mean(dim=-1) logits_meta = torch.Tensor( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) assert torch.allclose(logits, logits_meta, atol=1e-4) @slow class OPTGenerationTest(unittest.TestCase): @property def prompts(self): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def test_generation_pre_attn_layer_norm(self): model_id = "facebook/opt-125m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = OPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) def test_batch_generation(self): model_id = "facebook/opt-350m" tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = OPTForCausalLM.from_pretrained(model_id) model.to(torch_device) tokenizer.padding_side = "left" # use different length sentences to test batching sentences = [ "Hello, my dog is a little", "Today, I", ] inputs = tokenizer(sentences, return_tensors="pt", padding=True) input_ids = inputs["input_ids"].to(torch_device) outputs = model.generate( input_ids=input_ids, attention_mask=inputs["attention_mask"].to(torch_device), ) inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device) output_non_padded = model.generate(input_ids=inputs_non_padded) num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item() inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device) output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) expected_output_sentence = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(expected_output_sentence, batch_out_sentence) self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence]) def test_generation_post_attn_layer_norm(self): model_id = "facebook/opt-350m" EXPECTED_OUTPUTS = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] predicted_outputs = [] tokenizer = GPT2Tokenizer.from_pretrained(model_id) model = OPTForCausalLM.from_pretrained(model_id) for prompt in self.prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=10) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) @require_torch_accelerator @require_torch_fp16 def test_batched_nan_fp16(self): # a bug manifested starting at models facebook/opt-1.3 and larger when running batched generations, # therefore not using a tiny model, but the smallest model the problem was seen with which is opt-1.3b. # please refer to this github thread: https://github.com/huggingface/transformers/pull/17437 for more details model_name = "facebook/opt-1.3b" tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") model = OPTForCausalLM.from_pretrained(model_name, dtype=torch.float16, use_cache=True).to(torch_device) model = model.eval() batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") input_ids = batch["input_ids"].to(torch_device) attention_mask = batch["attention_mask"].to(torch_device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask) self.assertFalse( torch.isnan(outputs.logits[0]).any().item() ) # the first logits could contain NaNs if it fails @slow def test_contrastive_search_opt(self): article = ( "A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the " "Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived " "there?" ) opt_tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-1.3b") opt_model = OPTForCausalLM.from_pretrained("facebook/opt-1.3b").to(torch_device) input_ids = opt_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = opt_model.generate(input_ids, penalty_alpha=0.6, top_k=5, max_length=256) generated_text = opt_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I " "am the Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have " "you lived there?\nStatue: A hundred years.\nHuman: And you’re from what country?\nStatue: The United " "States of America.\nHuman: Why did you come to America?\nStatue: I came to escape the tyranny of my " "country.\nHuman: What tyranny?\nStatue: They didn’t let me speak my mind.\nHuman: What was your " "country?\nStatue: It was a country of immigrants.\nHuman: Who were the immigrants?\nStatue: They " "were from all over the world.\nHuman: What language did they speak?\nStatue: French, Spanish, " "Italian, German, English—you name it.\nHuman: And where did they come from?\nStatue: They came from " "every country in the world.\nHuman: And you were born in what country?\nStatue: I was born in " "France.\nHuman: And your parents were French?\nStatue" ], )
transformers/tests/models/opt/test_modeling_opt.py/0
{ "file_path": "transformers/tests/models/opt/test_modeling_opt.py", "repo_id": "transformers", "token_count": 10411 }
605
# Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image if is_torchvision_available(): from transformers import PerceptionLMImageProcessorFast class PerceptionLMImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, tile_size=16, do_normalize=True, image_mean=IMAGENET_STANDARD_MEAN, image_std=IMAGENET_STANDARD_STD, do_convert_rgb=True, max_num_tiles=4, vision_input_type="thumb+tile", resample=Image.Resampling.BICUBIC, # dummy value size={"shortest_edge": 20}, # dummy value ): super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.tile_size = tile_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.max_num_tiles = max_num_tiles self.vision_input_type = vision_input_type self.resample = resample self.size = size def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "tile_size": self.tile_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "max_num_tiles": self.max_num_tiles, "vision_input_type": self.vision_input_type, "resample": self.resample, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class PerceptionLMImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): fast_image_processing_class = PerceptionLMImageProcessorFast if is_torchvision_available() else None test_slow_image_processor = False def setUp(self): super().setUp() self.image_processor_tester = PerceptionLMImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "tile_size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "max_num_tiles")) self.assertTrue(hasattr(image_processing, "vision_input_type")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.tile_size, 16) self.assertEqual(image_processor.max_num_tiles, 4) self.assertEqual(image_processor.vision_input_type, "thumb+tile") image_processor = image_processing_class.from_dict( self.image_processor_dict, tile_size=42, max_num_tiles=9 ) self.assertEqual(image_processor.tile_size, 42) self.assertEqual(image_processor.max_num_tiles, 9) self.assertEqual(image_processor.vision_input_type, "thumb+tile") def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) @unittest.skip(reason="PerceptionLMImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") def test_call_numpy_4_channels(self): pass def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 5, 3, 16, 16) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of ipnut format self.assertTrue((encoded_images_nested == encoded_images).all())
transformers/tests/models/perception_lm/test_image_processing_perception_lm.py/0
{ "file_path": "transformers/tests/models/perception_lm/test_image_processing_perception_lm.py", "repo_id": "transformers", "token_count": 4218 }
606
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2-VL model.""" import copy import gc import tempfile import unittest import requests from transformers import ( AutoProcessor, Qwen2VLConfig, Qwen2VLForConditionalGeneration, Qwen2VLModel, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( Expectations, backend_empty_cache, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_vision_available(): from PIL import Image class Qwen2VLVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, ignore_index=-100, image_size=14, bos_token_id=0, eos_token_id=1, pad_token_id=2, vision_start_token_id=3, image_token_id=4, video_token_id=5, hidden_act="silu", hidden_size=32, vocab_size=99, intermediate_size=37, max_position_embeddings=512, max_window_layers=3, model_type="qwen2_vl", num_attention_heads=4, num_hidden_layers=4, num_key_value_heads=2, rope_theta=10000, tie_word_embeddings=True, is_training=True, vision_config={ "depth": 2, "embed_dim": 32, "hidden_act": "quick_gelu", "hidden_size": 32, "mlp_ratio": 4, "num_heads": 4, "patch_size": 14, "spatial_merge_size": 1, "temporal_patch_size": 2, }, rope_scaling={"type": "mrope", "mrope_section": [2, 1, 1]}, ): self.parent = parent self.ignore_index = ignore_index self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.vision_start_token_id = vision_start_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.hidden_act = hidden_act self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.max_window_layers = max_window_layers self.model_type = model_type self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.num_key_value_heads = num_key_value_heads self.rope_theta = rope_theta self.tie_word_embeddings = tie_word_embeddings self.vision_config = vision_config self.rope_scaling = rope_scaling self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.vocab_size = vocab_size self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return Qwen2VLConfig( hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, vision_config=self.vision_config, model_type=self.model_type, max_window_layers=self.max_window_layers, rope_scaling=self.rope_scaling, tie_word_embeddings=self.tie_word_embeddings, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, vision_start_token_id=self.vision_start_token_id, image_token_id=self.image_token_id, video_token_id=self.video_token_id, vocab_size=self.vocab_size, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id attention_mask[:, -1] = 0 input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id input_ids[:, self.num_image_tokens - 1] = self.vision_start_token_id inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size, device=torch_device), "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Qwen2VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen2VLForConditionalGeneration`. """ all_model_classes = ( ( Qwen2VLModel, Qwen2VLForConditionalGeneration, ) if is_torch_available() else () ) pipeline_model_mapping = {"image-text-to-text": Qwen2VLForConditionalGeneration} test_pruning = False test_head_masking = False _is_composite = True def setUp(self): self.model_tester = Qwen2VLVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2VLConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompt has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) curr_input_dict = copy.deepcopy(input_dict) _ = model(**curr_input_dict) # successfull forward with no modifications # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-one_img_length:, ...] curr_input_dict["image_grid_thw"] = curr_input_dict["image_grid_thw"][-1:, ...] with self.assertRaises(ValueError): _ = model(**curr_input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = curr_input_dict["input_ids"][:1] pixel_values = curr_input_dict["pixel_values"][:one_img_length] image_grid_thw = curr_input_dict["image_grid_thw"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) _ = model(input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw) def test_forward_with_rope_deltas_cached(self): """ Tests that Qwen2-VL computes new rope deltas every forward pass with new set of inputs. Rope deltas are cached when we generate and re-used for decoding phase, byt are not reset automatically after generation ends. See https://github.com/huggingface/transformers/pull/36013 for more """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) # Generate and make sure rope_deltas are not `None` self.assertTrue(model.model.rope_deltas is None) generation_output = model.generate( **input_dict, max_new_tokens=4, return_dict_in_generate=True, output_logits=True ) self.assertTrue(model.model.rope_deltas is not None) # Now if we try to do forward pass, we should get new rope logits, because cache is not passed forward_output = model(**input_dict) torch.testing.assert_close( generation_output.logits[0], forward_output.logits[:, -1, :], rtol=1e-4, atol=1e-4 ) def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) # flatten padfree_inputs_dict = { "pixel_values": inputs_dict["pixel_values"], "image_grid_thw": inputs_dict["image_grid_thw"], "input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0), } # add position_ids vision_position_ids, deltas = model.model.get_rope_index( input_ids=inputs_dict["input_ids"], image_grid_thw=inputs_dict["image_grid_thw"], attention_mask=inputs_dict["attention_mask"], ) # [3, bs, padded-seq-len] vision_padfree_positions = vision_position_ids[:, dummy_attention_mask.bool()].view( 3, -1 ) # [3, bs*padfree-len] text_padfree_positions = torch.cat( [torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()] ) # [1, bs*padfree-len] text_padfree_positions = text_padfree_positions.long().unsqueeze(0).to(torch_device) padfree_inputs_dict["position_ids"] = torch.cat([text_padfree_positions, vision_padfree_positions])[ :, None, : ] if fa_kwargs: cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist() cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device) max_length = cu_seq_lens.diff().max().item() padfree_inputs_dict.update( { "cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "max_length_q": max_length, "max_length_k": max_length, } ) # We need to do simple forward without cache in roder to trigger packed SDPA/FLEX/EAGER path res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="CPU offload is not yet supported") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2VL models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="We cannot configure to output a smaller model.") def test_model_is_small(self): pass @require_torch class Qwen2VLIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct") self.messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What kind of dog is this?"}, ], } ] url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg" self.image = Image.open(requests.get(url, stream=True).raw) def tearDown(self): gc.collect() backend_empty_cache(torch_device) @slow def test_small_model_integration_test(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt") expected_input_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151652, 151655, 151655] # fmt: skip assert expected_input_ids == inputs.input_ids[0].tolist()[:17] expected_pixel_slice = torch.tensor( [ [0.8792, 0.8792, 0.9084], [1.1858, 1.1858, 1.2296], [1.2004, 1.2004, 1.2150], [1.4340, 1.4340, 1.4194], [1.3902, 1.4048, 1.4194], [1.5216, 1.5362, 1.5362], ], dtype=torch.float32, device="cpu", ) assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) # verify generation inputs = inputs.to(torch_device) output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_expand(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt").to(torch_device) output = model.generate(**inputs, max_new_tokens=30, num_return_sequences=3) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_wo_image(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am a large language model created by Alibaba Cloud. I am called Qwen.' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_different_resolutions(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) image2 = self.image.resize((224, 224)) inputs = self.processor(text=[text, text2], images=[self.image, image2], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) DECODED_TEXT = self.processor.batch_decode(output, skip_special_tokens=True) EXPECTED_DECODED_TEXTS = Expectations( { ("xpu", 3): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', ], ("cuda", None): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets', ], ("cuda", 8): [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices' ], } ) # fmt: skip EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() self.assertEqual(DECODED_TEXT, EXPECTED_DECODED_TEXT) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_flashatt2(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = Qwen2VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2-VL-7B-Instruct", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am a large language model created by Alibaba Cloud. I am called Qwen.' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
transformers/tests/models/qwen2_vl/test_modeling_qwen2_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_vl/test_modeling_qwen2_vl.py", "repo_id": "transformers", "token_count": 12887 }
607
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RoFormer model.""" import unittest from transformers import RoFormerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerSelfAttention, RoFormerSinusoidalPositionalEmbedding, ) class RoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = RoFormerModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_generate_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = RoFormerForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=15, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=15, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = RoFormerForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = RoFormerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = RoFormerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = RoFormerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class RoFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( RoFormerModel, RoFormerForMaskedLM, RoFormerForCausalLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, ) if is_torch_available() else () ) # Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": RoFormerModel, "fill-mask": RoFormerForMaskedLM, "question-answering": RoFormerForQuestionAnswering, "text-classification": RoFormerForSequenceClassification, "text-generation": RoFormerForCausalLM, "token-classification": RoFormerForTokenClassification, "zero-shot": RoFormerForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = RoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_generate_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_generate_causal_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): model_name = "junnyu/roformer_chinese_small" model = RoFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch class RoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @require_torch class RoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb1._init_weight() emb1 = emb1.to(torch_device) emb = emb1(input_ids.shape) desired_weights = torch.tensor( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ).to(torch_device) self.assertTrue( torch.allclose(emb, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_positional_emb_weights_against_roformer(self): desired_weights = torch.tensor( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ).to(torch_device) emb1 = RoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512).to(torch_device) emb1._init_weight() weights = emb1.weight.data[:3, :5].to(torch_device) self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) @require_torch class RoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = ( torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) key_layer = ( -torch.arange(2 * 12 * 16 * 64, dtype=torch.float, device=torch_device).reshape(2, 12, 16, 64) / 100 ).to(torch_device) embed_positions = RoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) embed_positions._init_weight() embed_positions = embed_positions.to(torch_device) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = RoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = torch.tensor( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ).to(torch_device) desired_key_layer = torch.tensor( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ).to(torch_device) self.assertTrue( torch.allclose(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_query_layer}\ngot:\n{query_layer}\n", ) self.assertTrue( torch.allclose(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance), msg=f"\nexp:\n{desired_key_layer}\ngot:\n{key_layer}\n", )
transformers/tests/models/roformer/test_modeling_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_roformer.py", "repo_id": "transformers", "token_count": 11106 }
608
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SAM2 model.""" import gc import tempfile import unittest import requests from transformers import ( Sam2Config, Sam2HieraDetConfig, Sam2MaskDecoderConfig, Sam2Processor, Sam2PromptEncoderConfig, Sam2VisionConfig, pipeline, ) from transformers.testing_utils import ( backend_empty_cache, require_torch, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from transformers.video_utils import load_video from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Sam2Model, Sam2Processor, Sam2VisionModel if is_vision_available(): from PIL import Image class Sam2VisionModelTester: def __init__( self, parent, hidden_size=12, embed_dim_per_stage=[12, 24, 48, 96], num_attention_heads_per_stage=[1, 2, 4, 8], num_channels=3, image_size=128, patch_kernel_size=7, patch_stride=4, patch_padding=3, batch_size=2, blocks_per_stage=[1, 2, 7, 2], backbone_channel_list=[96, 48, 24, 12], backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]], fpn_hidden_size=32, is_training=False, ): self.parent = parent self.hidden_size = hidden_size self.image_size = image_size self.num_channels = num_channels self.patch_kernel_size = patch_kernel_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.batch_size = batch_size self.is_training = is_training self.blocks_per_stage = blocks_per_stage self.embed_dim_per_stage = embed_dim_per_stage self.num_attention_heads_per_stage = num_attention_heads_per_stage self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size def get_config(self): backbone_config = Sam2HieraDetConfig( hidden_size=self.hidden_size, num_channels=self.num_channels, image_size=self.image_size, patch_stride=self.patch_stride, patch_kernel_size=self.patch_kernel_size, patch_padding=self.patch_padding, blocks_per_stage=self.blocks_per_stage, embed_dim_per_stage=self.embed_dim_per_stage, num_attention_heads_per_stage=self.num_attention_heads_per_stage, ) return Sam2VisionConfig( backbone_config=backbone_config, backbone_channel_list=self.backbone_channel_list, backbone_feature_sizes=self.backbone_feature_sizes, fpn_hidden_size=self.fpn_hidden_size, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def create_and_check_model(self, config, pixel_values): model = Sam2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) output_size = self.image_size // self.patch_stride // (2 * len(self.blocks_per_stage)) output_channels = self.hidden_size * 2 * len(self.blocks_per_stage) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, output_size, output_size, output_channels) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Sam2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Sam2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False test_torch_exportable = True def setUp(self): self.model_tester = Sam2VisionModelTester(self) self.config_tester = ConfigTester(self, config_class=Sam2VisionConfig, has_text_modality=False) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Overriding as attention shape depends on window_size def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.blocks_per_stage) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size = config.backbone_config.window_size_per_stage[0] out_dim = config.backbone_config.hidden_size patch_stride = config.backbone_config.patch_stride num_windows = ( self.model_tester.batch_size * (config.backbone_config.image_size // (window_size * patch_stride)) ** 2 ) model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-4:]), [num_windows, window_size, window_size, out_dim], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-4:]), [num_windows, window_size, window_size, out_dim], ) # Overriding as attention shape depends on window_size def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = sum(self.model_tester.blocks_per_stage) + 1 self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-4:]), [ self.model_tester.batch_size, self.model_tester.image_size // self.model_tester.patch_stride, self.model_tester.image_size // self.model_tester.patch_stride, self.model_tester.hidden_size, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = self.model_tester.image_size for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class, image_size) # Override as diffence slightly higher than the threshold def test_batching_equivalence(self, atol=5e-4, rtol=5e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="SAM model can't be compiled dynamic yet") class Sam2PromptEncoderTester: def __init__( self, hidden_size=32, input_image_size=128, patch_size=16, mask_input_channels=8, num_point_embeddings=4, hidden_act="gelu", ): self.hidden_size = hidden_size self.input_image_size = input_image_size self.patch_size = patch_size self.mask_input_channels = mask_input_channels self.num_point_embeddings = num_point_embeddings self.hidden_act = hidden_act def get_config(self): return Sam2PromptEncoderConfig( image_size=self.input_image_size, patch_size=self.patch_size, mask_input_channels=self.mask_input_channels, hidden_size=self.hidden_size, num_point_embeddings=self.num_point_embeddings, hidden_act=self.hidden_act, ) def prepare_config_and_inputs(self): dummy_points = floats_tensor([self.batch_size, 3, 2]) config = self.get_config() return config, dummy_points class Sam2MaskDecoderTester: def __init__( self, hidden_size=32, hidden_act="relu", mlp_dim=64, num_hidden_layers=2, num_attention_heads=4, attention_downsample_rate=2, num_multimask_outputs=3, iou_head_depth=3, iou_head_hidden_dim=32, ): self.hidden_size = hidden_size self.hidden_act = hidden_act self.mlp_dim = mlp_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.attention_downsample_rate = attention_downsample_rate self.num_multimask_outputs = num_multimask_outputs self.iou_head_depth = iou_head_depth self.iou_head_hidden_dim = iou_head_hidden_dim def get_config(self): return Sam2MaskDecoderConfig( hidden_size=self.hidden_size, hidden_act=self.hidden_act, mlp_dim=self.mlp_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, attention_downsample_rate=self.attention_downsample_rate, num_multimask_outputs=self.num_multimask_outputs, iou_head_depth=self.iou_head_depth, iou_head_hidden_dim=self.iou_head_hidden_dim, ) def prepare_config_and_inputs(self): config = self.get_config() dummy_inputs = { "image_embedding": floats_tensor([self.batch_size, self.hidden_size]), } return config, dummy_inputs class Sam2ModelTester: def __init__( self, parent, num_channels=3, image_size=128, hidden_size=12, patch_kernel_size=7, patch_stride=4, patch_padding=3, blocks_per_stage=[1, 2, 7, 2], embed_dim_per_stage=[12, 24, 48, 96], backbone_channel_list=[96, 48, 24, 12], backbone_feature_sizes=[[32, 32], [16, 16], [8, 8]], fpn_hidden_size=32, memory_encoder_hidden_size=32, batch_size=2, is_training=False, ): self.parent = parent self.image_size = image_size self.hidden_size = hidden_size self.patch_kernel_size = patch_kernel_size self.patch_stride = patch_stride self.patch_padding = patch_padding self.blocks_per_stage = blocks_per_stage self.embed_dim_per_stage = embed_dim_per_stage self.backbone_channel_list = backbone_channel_list self.backbone_feature_sizes = backbone_feature_sizes self.fpn_hidden_size = fpn_hidden_size self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.memory_encoder_hidden_size = memory_encoder_hidden_size self.prompt_encoder_tester = Sam2PromptEncoderTester() self.mask_decoder_tester = Sam2MaskDecoderTester() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): backbone_config = Sam2HieraDetConfig( hidden_size=self.hidden_size, num_channels=self.num_channels, image_size=self.image_size, patch_stride=self.patch_stride, patch_kernel_size=self.patch_kernel_size, patch_padding=self.patch_padding, blocks_per_stage=self.blocks_per_stage, embed_dim_per_stage=self.embed_dim_per_stage, ) vision_config = Sam2VisionConfig( backbone_config=backbone_config, backbone_channel_list=self.backbone_channel_list, backbone_feature_sizes=self.backbone_feature_sizes, fpn_hidden_size=self.fpn_hidden_size, ) prompt_encoder_config = self.prompt_encoder_tester.get_config() mask_decoder_config = self.mask_decoder_tester.get_config() return Sam2Config( vision_config=vision_config, prompt_encoder_config=prompt_encoder_config, mask_decoder_config=mask_decoder_config, memory_attention_hidden_size=self.hidden_size, memory_encoder_hidden_size=self.memory_encoder_hidden_size, image_size=self.image_size, mask_downsampler_embed_dim=32, memory_fuser_embed_dim=32, memory_attention_num_layers=1, memory_attention_feed_forward_hidden_size=32, ) def create_and_check_model(self, config, pixel_values): model = Sam2Model(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.iou_scores.shape, (self.batch_size, 1, 3)) self.parent.assertEqual(result.pred_masks.shape[:3], (self.batch_size, 1, 3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Sam2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SAM's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Sam2Model,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Sam2Model, "mask-generation": Sam2Model} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False _is_composite = True def setUp(self): self.model_tester = Sam2ModelTester(self) common_properties = ["initializer_range"] self.config_tester = ConfigTester( self, config_class=Sam2Config, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SAM's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # Overriding as attention shape depends on window_size def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.vision_attentions expected_num_attentions = sum(self.model_tester.blocks_per_stage) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.mask_decoder_config.output_attentions = True config.vision_config.output_attentions = True config.output_attentions = True model = model_class._from_config(config, attn_implementation="eager") window_size = config.vision_config.backbone_config.window_size_per_stage[0] out_dim = self.model_tester.hidden_size patch_stride = self.model_tester.patch_stride num_windows = ( self.model_tester.batch_size * (self.model_tester.image_size // (window_size * patch_stride)) ** 2 ) model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.vision_attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-4:]), [num_windows, window_size, window_size, out_dim], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.vision_attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-4:]), [num_windows, window_size, window_size, out_dim], ) # Override as Sam2Model has different sub-modules def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa") model_sdpa = model_sdpa.eval().to(torch_device) vision_encoder_sdpa = getattr(model_sdpa, "vision_encoder") mask_decoder_sdpa = getattr(model_sdpa, "mask_decoder") # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(mask_decoder_sdpa.config._attn_implementation == "sdpa") self.assertTrue(vision_encoder_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(getattr(model_eager, "mask_decoder").config._attn_implementation == "eager") self.assertTrue(getattr(model_eager, "vision_encoder").config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") # Override as Sam2Model doesn't have hidden states def flash_attn_inference_equivalence(self, attn_implementation: str, padding_side: str): r""" Tests the equivalence between the eager and flash attention implementations. This test is only for inference and runs with `dtype=torch.bfloat16`. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if (attn_implementation == "flash_attention_2" and not model_class._supports_flash_attn_2) or ( attn_implementation == "flash_attention_3" and not model_class._supports_flash_attn_3 ): self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] if padding_side == "left": dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 else: dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = outputs.vision_hidden_states[-1] logits_fa = outputs_fa.vision_hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = outputs.vision_hidden_states[-1] logits_fa = outputs_fa.vision_hidden_states[-1] if padding_side == "left": assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) else: assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) # Override as diffence slightly higher than the threshold def test_batching_equivalence(self, atol=5e-4, rtol=5e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) @unittest.skip(reason="Sam2Model does not support training") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Hidden_states is tested in sub modules tests") def test_hidden_states_output(self): pass @slow def test_model_from_pretrained(self): model_name = "facebook/sam2.1-hiera-tiny" model = Sam2Model.from_pretrained(model_name) self.assertIsNotNone(model) def test_sdpa_can_compile_dynamic(self): self.skipTest(reason="SAM2 model can't be compiled dynamic yet") def prepare_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/truck.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_groceries_image(): img_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/groceries.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_dog_img(): img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/dog-sam.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_video(): video_url = "https://huggingface.co/datasets/hf-internal-testing/sam2-fixtures/resolve/main/bedroom.mp4" raw_video, _ = load_video(video_url) return raw_video @slow class Sam2ModelIntegrationTest(unittest.TestCase): def setUp(self): super().setUp() self.model = Sam2Model.from_pretrained("facebook/sam2.1-hiera-tiny").to(torch.float32) self.processor = Sam2Processor.from_pretrained("facebook/sam2.1-hiera-tiny") self.model.to(torch_device) self.model.eval() def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) def test_inference_mask_generation_one_point_multimask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs) self.assertEqual(outputs.iou_scores.shape, (1, 1, 3)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 3, 256, 256)) sorted_indices = torch.argsort(outputs.iou_scores.squeeze(), descending=True) scores = outputs.iou_scores.squeeze()[sorted_indices] masks_logits = outputs.pred_masks.squeeze()[sorted_indices][0, :3, :3] torch.testing.assert_close( scores, torch.tensor([0.9547, 0.4932, 0.0427]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits, torch.tensor( [[-24.9288, -41.7466, -31.0128], [-34.5113, -31.1054, -36.5913], [-25.2597, -37.5912, -33.4030]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_one_point_no_multimask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.9364]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor( [[-7.0462, -13.3857, -9.6419], [-10.4565, -9.7174, -12.3528], [-7.3704, -12.4391, -10.5539]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_batched_images_multi_points(self): raw_image1 = prepare_image() raw_image2 = prepare_dog_img() input_points = [[[[500, 375]]], [[[770, 200], [730, 120]]]] input_labels = [[[1]], [[1, 0]]] inputs = self.processor( images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs) self.assertEqual(outputs.iou_scores.shape, (2, 1, 3)) self.assertEqual(outputs.pred_masks.shape, (2, 1, 3, 256, 256)) sorted_indices = torch.argsort(outputs.iou_scores[0].squeeze(), descending=True) scores1 = outputs.iou_scores[0].squeeze()[sorted_indices] masks_logits1 = outputs.pred_masks[0].squeeze()[sorted_indices][0, :3, :3] sorted_indices = torch.argsort(outputs.iou_scores[1].squeeze(), descending=True) scores2 = outputs.iou_scores[1].squeeze()[sorted_indices] masks_logits2 = outputs.pred_masks[1].squeeze()[sorted_indices][0, :3, :3] torch.testing.assert_close( scores1, torch.tensor([0.9586, 0.4913, 0.0448]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits1, torch.tensor( [[-22.2555, -37.9250, -27.8928], [-30.8681, -27.9519, -32.8032], [-22.4133, -33.9966, -29.7111]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( scores2, torch.tensor([0.9504, 0.8117, 0.7426]).to(torch_device), atol=1e-4, rtol=1e-4 ) torch.testing.assert_close( masks_logits2, torch.tensor( [[-13.1182, -17.3217, -14.9651], [-16.2372, -12.7739, -17.6346], [-13.5013, -17.1549, -15.6614]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_batched_images_batched_points_multi_points(self): raw_image1 = prepare_image() raw_image2 = prepare_groceries_image() input_points = [[[[500, 375]], [[650, 750]]], [[[400, 300]], [[630, 300], [550, 300]]]] input_labels = [[[1], [1]], [[1], [1, 1]]] inputs = self.processor( images=[raw_image1, raw_image2], input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (2, 2, 1)) self.assertEqual(outputs.pred_masks.shape, (2, 2, 1, 256, 256)) torch.testing.assert_close( outputs.iou_scores, torch.tensor([[[0.9500], [0.9718]], [[0.9568], [0.9114]]]).to(torch_device), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( outputs.pred_masks[:, :, :, :2, :2], torch.tensor( [ [[[[-5.8131, -11.3020], [-8.6487, -8.0690]]], [[[-4.7731, -8.7606], [-6.2399, -7.0738]]]], [[[[-13.8661, -19.1254], [-20.2477, -14.1636]]], [[[-8.8229, -10.2760], [-11.3797, -8.7189]]]], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_batched_images_batched_boxes(self): raw_image1 = prepare_image() raw_image2 = prepare_groceries_image() input_boxes = [ [[75, 275, 1725, 850], [425, 600, 700, 875], [1375, 550, 1650, 800], [1240, 675, 1400, 750]], [[450, 170, 520, 350], [350, 190, 450, 350], [500, 170, 580, 350], [580, 170, 640, 350]], ] inputs = self.processor(images=[raw_image1, raw_image2], input_boxes=input_boxes, return_tensors="pt").to( torch_device ) with torch.no_grad(): outputs = self.model(**inputs, multimask_output=False) self.assertEqual(outputs.iou_scores.shape, (2, 4, 1)) self.assertEqual(outputs.pred_masks.shape, (2, 4, 1, 256, 256)) torch.testing.assert_close( outputs.iou_scores, torch.tensor([[[0.9873], [0.9264], [0.9496], [0.9208]], [[0.9445], [0.9496], [0.9497], [0.9481]]]).to( torch_device ), atol=1e-4, rtol=1e-4, ) torch.testing.assert_close( outputs.pred_masks[:, :, :, :2, :2], torch.tensor( [ [ [[[-7.6204, -11.9286], [-8.7747, -10.5662]]], [[[-17.1070, -23.4025], [-20.9608, -19.5600]]], [[[-20.5766, -29.4410], [-26.0739, -24.3225]]], [[[-19.7201, -29.0836], [-24.4915, -23.6377]]], ], [ [[[-18.5259, -23.5202], [-25.1906, -17.2518]]], [[[-20.1214, -25.4215], [-25.7877, -19.1169]]], [[[-21.0878, -24.7938], [-27.5625, -19.2650]]], [[[-20.5210, -22.5343], [-26.0968, -17.7544]]], ], ] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_inference_mask_generation_from_existing_points_and_mask(self): raw_image = prepare_image() input_points = [[[[500, 375]]]] input_labels = [[[1]]] original_inputs = self.processor( images=raw_image, input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = self.model(**original_inputs) # best mask to use as input for new points mask_input = outputs.pred_masks[:, :, torch.argmax(outputs.iou_scores)] new_input_points = [[[[500, 375], [1125, 625]]]] new_input_labels = [[[1, 1]]] inputs = self.processor( input_points=new_input_points, input_labels=new_input_labels, original_sizes=original_inputs["original_sizes"], return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = self.model( **inputs, input_masks=mask_input, image_embeddings=outputs.image_embeddings, multimask_output=False, ) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.9738]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor([[-5.3899, -9.7908, -8.4931], [-5.5144, -8.8731, -8.3000], [-5.5976, -9.9249, -9.0761]]).to( torch_device ), atol=1e-4, rtol=1e-4, ) # with negative point new_input_points = [[[[500, 375], [1125, 625]]]] new_input_labels = [[[1, 0]]] inputs = self.processor( input_points=new_input_points, input_labels=new_input_labels, original_sizes=original_inputs["original_sizes"], return_tensors="pt", ).to(torch_device) with torch.no_grad(): outputs = self.model( **inputs, input_masks=mask_input, image_embeddings=outputs.image_embeddings, multimask_output=False, ) self.assertEqual(outputs.iou_scores.shape, (1, 1, 1)) self.assertEqual(outputs.pred_masks.shape, (1, 1, 1, 256, 256)) scores = outputs.iou_scores.squeeze((0, 1)) masks_logits = outputs.pred_masks.squeeze((0, 1))[0, :3, :3] torch.testing.assert_close(scores, torch.tensor([0.9719]).to(torch_device), atol=1e-4, rtol=1e-4) torch.testing.assert_close( masks_logits, torch.tensor( [[-15.5081, -21.8641, -18.0479], [-17.4401, -17.4754, -23.6469], [-14.3975, -19.4346, -18.5884]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) def test_dummy_pipeline_generation(self): generator = pipeline("mask-generation", model="facebook/sam2.1-hiera-tiny", device=torch_device) raw_image = prepare_image() _ = generator(raw_image, points_per_batch=64)
transformers/tests/models/sam2/test_modeling_sam2.py/0
{ "file_path": "transformers/tests/models/sam2/test_modeling_sam2.py", "repo_id": "transformers", "token_count": 20228 }
609
# Copyright 2025 Bytedance-Seed Ltd and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SeedOss model.""" import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, SeedOssConfig, is_torch_available from transformers.testing_utils import ( cleanup, require_flash_attn, require_torch, require_torch_large_accelerator, require_torch_large_gpu, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( SeedOssForCausalLM, SeedOssForQuestionAnswering, SeedOssForSequenceClassification, SeedOssForTokenClassification, SeedOssModel, ) class SeedOssModelTester(CausalLMModelTester): if is_torch_available(): config_class = SeedOssConfig base_model_class = SeedOssModel causal_lm_class = SeedOssForCausalLM sequence_classification_class = SeedOssForSequenceClassification token_classification_class = SeedOssForTokenClassification question_answering_class = SeedOssForQuestionAnswering @require_torch class SeedOssModelTest(CausalLMModelTest, unittest.TestCase): model_tester_class = SeedOssModelTester all_model_classes = ( ( SeedOssModel, SeedOssForCausalLM, SeedOssForSequenceClassification, SeedOssForTokenClassification, SeedOssForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": SeedOssModel, "text-classification": SeedOssForSequenceClassification, "token-classification": SeedOssForTokenClassification, "text-generation": SeedOssForCausalLM, "zero-shot": SeedOssForSequenceClassification, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False _is_stateful = True model_split_percents = [0.5, 0.6] @slow @require_torch_large_accelerator class SeedOssIntegrationTest(unittest.TestCase): input_text = ["How to make pasta?", "Hi ByteDance-Seed"] model_id = "ByteDance-Seed/Seed-OSS-36B-Base" def tearDown(self): cleanup(torch_device, gc_collect=True) def test_model_36b_fp16(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on my local machine. I have installed all the", ] model = AutoModelForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.float16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True, return_token_type_ids=False).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) def test_model_36b_bf16(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on my local machine. I have installed all the", ] model = AutoModelForCausalLM.from_pretrained(self.model_id, torch_dtype=torch.bfloat16, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) def test_model_36b_eager(self): EXPECTED_TEXTS = "" model = AutoModelForCausalLM.from_pretrained( self.model_id, torch_dtype=torch.bfloat16, attn_implementation="eager", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) def test_model_36b_sdpa(self): EXPECTED_TEXTS = [ "How to make pasta?\nHow to make pasta?\nPasta is a popular dish that is enjoyed by people all over", "Hi ByteDance-Seed team,\nI am trying to run the code on my local machine. I have installed all the", ] model = AutoModelForCausalLM.from_pretrained( self.model_id, torch_dtype=torch.bfloat16, attn_implementation="sdpa", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS) @require_flash_attn @require_torch_large_gpu @pytest.mark.flash_attn_test def test_model_36b_flash_attn(self): EXPECTED_TEXTS = "" model = AutoModelForCausalLM.from_pretrained( self.model_id, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto" ) model.to(torch_device) tokenizer = AutoTokenizer.from_pretrained(self.model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to( model.model.embed_tokens.weight.device ) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=True) self.assertEqual(output_text, EXPECTED_TEXTS)
transformers/tests/models/seed_oss/test_modeling_seed_oss.py/0
{ "file_path": "transformers/tests/models/seed_oss/test_modeling_seed_oss.py", "repo_id": "transformers", "token_count": 2914 }
610
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SigLIP model.""" import inspect import os import tempfile import unittest import numpy as np import requests from parameterized import parameterized from pytest import mark from transformers import SiglipConfig, SiglipTextConfig, SiglipVisionConfig from transformers.testing_utils import ( is_flaky, require_flash_attn, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SiglipForImageClassification, SiglipModel, SiglipTextModel, SiglipVisionModel if is_vision_available(): from PIL import Image from transformers import SiglipProcessor class SiglipModelTesterMixin(ModelTesterMixin): def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) if hasattr(model_sdpa, "vision_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") if hasattr(model_sdpa, "text_model"): self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.text_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") class SiglipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return SiglipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = SiglipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipVisionModelTest(SiglipModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SiglipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = SiglipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=SiglipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SIGLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) class SiglipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return SiglipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = SiglipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SiglipTextModelTest(SiglipModelTesterMixin, unittest.TestCase): all_model_classes = (SiglipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipTextConfig, hidden_size=37) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip does not use inputs_embeds") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class SiglipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = SiglipTextModelTester(parent, **text_kwargs) self.vision_model_tester = SiglipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training # Copied from tests.models.clip.test_modeling_clip.CLIPModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return SiglipConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = SiglipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": False, } return config, inputs_dict @require_torch class SiglipModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): additional_model_inputs = ["pixel_values"] all_model_classes = (SiglipModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": SiglipModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = SiglipModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_retain_grad_hidden_states_attentions def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="SiglipModel does not have input/output embeddings") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_get_set_embeddings def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest._create_and_check_torchscript with CLIP->Siglip def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Siglip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_load_vision_text_config with CLIP->Siglip def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save SiglipConfig and check if we can load SiglipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = SiglipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save SiglipConfig and check if we can load SiglipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = SiglipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # Test with attention mask dummy_attention_mask = inputs_dict["attention_mask"] if dummy_attention_mask is not None: dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # check with inference + dropout model.train() _ = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest("SigLIP does not support right padding") class SiglipForImageClassificationModelTester(SiglipModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipForImageClassificationModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": SiglipForImageClassification} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = SiglipForImageClassificationModelTester(self) @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class SiglipModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) processor = SiglipProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of 2 cats", "a photo of 2 dogs"], images=image, padding="max_length", return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image logits_per_text = outputs.logits_per_text # verify the logits self.assertEqual( logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-0.7567, -10.3354]], device=torch_device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) # verify the probs probs = torch.sigmoid(logits_per_image) # these are the probabilities expected_probs = torch.tensor([[3.1937e-01, 3.2463e-05]], device=torch_device) torch.testing.assert_close(probs, expected_probs, rtol=1e-3, atol=1e-3) @slow def test_inference_interpolate_pos_encoding(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) # 640 x 480 image image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") processor = SiglipProcessor.from_pretrained(model_name, do_resize=False, size={"height": 480, "width": 640}) inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the shape # patch size = 16 # batch size 1, (640/16) * (480/16) = 1200 patches, 768 hidden size expected_shape = torch.Size((1, 1200, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
transformers/tests/models/siglip/test_modeling_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_modeling_siglip.py", "repo_id": "transformers", "token_count": 14372 }
611
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Speech2Text model.""" import copy import inspect import os import tempfile import unittest from datasets import load_dataset from transformers import Speech2TextConfig from transformers.testing_utils import ( is_torch_available, require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, require_torchaudio, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextProcessor from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder def prepare_speech_to_text_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_features.ne(0) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { # "input_ids": input_features, "input_features": input_features, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class Speech2TextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=32, input_feat_per_channel=24, input_channels=1, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=20, max_target_positions=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = conv_kernel_sizes self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_features = floats_tensor( [self.batch_size, self.seq_length, self.input_feat_per_channel], self.vocab_size ) attention_mask = torch.ones([self.batch_size, self.seq_length], dtype=torch.long, device=torch_device) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2) config = self.get_config() inputs_dict = prepare_speech_to_text_inputs_dict( config, input_features=input_features, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, ) return config, inputs_dict def get_config(self): return Speech2TextConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, num_conv_layers=self.num_conv_layers, conv_kernel_sizes=self.conv_kernel_sizes, conv_channels=self.conv_channels, input_feat_per_channel=self.input_feat_per_channel, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = Speech2TextModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = Speech2TextModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = Speech2TextEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder( inputs_dict["input_features"], attention_mask=inputs_dict["attention_mask"] )[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = Speech2TextDecoder.from_pretrained(tmpdirname).to(torch_device) encoder_attention_mask = encoder._get_feature_vector_attention_mask( encoder_last_hidden_state.shape[1], inputs_dict["attention_mask"] ) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=encoder_attention_mask, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Speech2TextModel, Speech2TextForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": Speech2TextForConditionalGeneration, "feature-extraction": Speech2TextModel} if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = Speech2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Speech2TextConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @unittest.skip(reason="Not implemented currently") def test_inputs_embeds(self): pass @unittest.skip(reason="Training is not supported yet") def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_features = input_dict["input_features"] attention_mask = input_dict["attention_mask"] model = Speech2TextForConditionalGeneration(config).eval().to(torch_device) input_features = input_features.half() model.half() model.generate(input_features, attention_mask=attention_mask) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untie embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @unittest.skip def test_generate_without_input_ids(self): pass def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward input_features = inputs["input_features"] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] traced_model = torch.jit.trace( model, (input_features, attention_mask, decoder_input_ids, decoder_attention_mask) ) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @unittest.skip(reason="Test failing, @RocketNight is looking into it") def test_tf_from_pt_safetensors(self): pass @require_torch @require_torchaudio @require_sentencepiece @require_tokenizers class Speech2TextModelIntegrationTests(unittest.TestCase): @classmethod def setUpClass(cls): model_name = "facebook/s2t-small-librispeech-asr" cls.model = Speech2TextForConditionalGeneration.from_pretrained(model_name, device_map="auto") cls.processor = Speech2TextProcessor.from_pretrained(model_name) # loads 4 samples ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(4))[:4]["audio"] cls.dataset = [x["array"] for x in speech_samples] def test_generation_librispeech(self): input_speech = [self.dataset[0]] input_features = self.processor(input_speech, return_tensors="pt").input_features.to(torch_device) generated_ids = self.model.generate(input_features) generated_transcript = self.processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" ] self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS) def test_generation_librispeech_batched(self): input_speech = self.dataset inputs = self.processor(input_speech, return_tensors="pt", padding=True) input_features = inputs.input_features.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) generated_ids = self.model.generate(input_features, attention_mask=attention_mask) generated_transcripts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) EXPECTED_TRANSCRIPTIONS = [ "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel", "nor is mister cultar's manner less interesting than his matter", "he tells us that at this festive season of the year with christmas and roast beef looming before us" " similes drawn from eating and its results occur most readily to the mind", "he has grave doubts whether sir frederick leyton's work is really greek after all and can discover in it" " but little of rocky ithaca", ] self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
transformers/tests/models/speech_to_text/test_modeling_speech_to_text.py/0
{ "file_path": "transformers/tests/models/speech_to_text/test_modeling_speech_to_text.py", "repo_id": "transformers", "token_count": 14479 }
612