text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DINOv3 model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DINOv3ViTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DINOv3Model`]. It is used to instantiate an DINOv3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DINOv3 [facebook/dinov3-vits16-pretrain-lvd1689m](https://huggingface.co/facebook/dinov3-vits16-pretrain-lvd1689m) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_size (`int`, *optional*, defaults to 384): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. rope_theta (`float`, *optional*, defaults to 100.0): The base period of the RoPE embeddings. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. num_channels (`int`, *optional*, defaults to 3): The number of input channels. query_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the query projection. key_bias (`bool`, *optional*, defaults to `False`): Whether to add a bias to the key projection. value_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the value projection. proj_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the output projection. mlp_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the MLP layers. layerscale_value (`float`, *optional*, defaults to 1.0): Initial value to use for layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): Stochastic depth rate per sample (when applied in the main path of residual layers). use_gated_mlp (`bool`, *optional*, defaults to `False`): Whether to use the SwiGLU feedforward neural network. num_register_tokens (`int`, *optional*, defaults to 0): The number of register tokens. pos_embed_shift (`float`, *optional*): Amount to randomly shift position embedding coordinates in [-shift, shift], applied only in training mode if not `None`. pos_embed_jitter (`float`, *optional*): Amount to randomly jitter position embedding coordinates in log-uniform value in [1/jitter, jitter], applied only in training mode if not `None`. pos_embed_rescale (`float`, *optional*, defaults to 2.0): Amount to randomly rescale position embedding coordinates in log-uniform value in [1/rescale, rescale], applied only in training mode if not `None`. Example: ```python >>> from transformers import DINOv3ViTConfig, DINOv3ViTModel >>> # Initializing a DINOv3 ViT-small style configuration >>> config = DINOv3ViTConfig() >>> # Initializing a model (with random weights) from the config >>> model = DINOv3ViTModel(config) >>> # Accessing the model config >>> config = model.config ```""" model_type = "dinov3_vit" def __init__( self, patch_size: int = 16, hidden_size: int = 384, intermediate_size: int = 1536, num_hidden_layers: int = 12, num_attention_heads: int = 6, hidden_act: str = "gelu", attention_dropout: float = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-5, rope_theta: float = 100.0, image_size: int = 224, num_channels: int = 3, query_bias: bool = True, key_bias: bool = False, value_bias: bool = True, proj_bias: bool = True, mlp_bias: bool = True, layerscale_value: float = 1.0, drop_path_rate: float = 0.0, use_gated_mlp: bool = False, num_register_tokens: int = 0, # train augs pos_embed_shift: Optional[float] = None, pos_embed_jitter: Optional[float] = None, pos_embed_rescale: Optional[float] = 2.0, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layerscale_value = layerscale_value self.drop_path_rate = drop_path_rate self.use_gated_mlp = use_gated_mlp self.rope_theta = rope_theta self.query_bias = query_bias self.key_bias = key_bias self.value_bias = value_bias self.proj_bias = proj_bias self.mlp_bias = mlp_bias self.num_register_tokens = num_register_tokens # train augs self.pos_embed_shift = pos_embed_shift self.pos_embed_jitter = pos_embed_jitter self.pos_embed_rescale = pos_embed_rescale __all__ = ["DINOv3ViTConfig"]
transformers/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py/0
{ "file_path": "transformers/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py", "repo_id": "transformers", "token_count": 2908 }
466
import argparse import json import os import re import torch from safetensors.torch import load_file from transformers import DogeConfig, DogeForCausalLM # fmt: off # `None` means we drop the key STATE_DICT_MAPPING = { # CausalLM keys r"^lm_head.weight": r"lm_head.weight", # Model keys r"^model.word_embed.weight": r"model.embed_tokens.weight", r"^model.rotary_emb.rotary_emb": r"model.rotary_emb.rotary_emb", r"^model.final_layernorm.weight": r"model.norm.weight", # Layers keys r"^model.layers.(\d+).pre_layernorm.weight": r"model.layers.\1.input_layernorm.weight", r"^model.layers.(\d+).pre_residual.weight": r"model.layers.\1.input_residual", r"^model.layers.(\d+).post_layernorm.weight": r"model.layers.\1.post_attention_layernorm.weight", r"^model.layers.(\d+).post_residual.weight": r"model.layers.\1.post_attention_residual", # Attention keys r"^model.layers.(\d+).self_attn.q_proj.weight": r"model.layers.\1.self_attn.q_proj.weight", r"^model.layers.(\d+).self_attn.k_proj.weight": r"model.layers.\1.self_attn.k_proj.weight", r"^model.layers.(\d+).self_attn.v_proj.weight": r"model.layers.\1.self_attn.v_proj.weight", r"^model.layers.(\d+).self_attn.A": r"model.layers.\1.self_attn.A", r"^model.layers.(\d+).self_attn.dt_proj.weight": r"model.layers.\1.self_attn.dt_proj.weight", r"^model.layers.(\d+).self_attn.o_proj.weight": r"model.layers.\1.self_attn.o_proj.weight", # Feedforward keys r"^model.layers.(\d+).feed_forward.gate_proj.weight": r"model.layers.\1.mlp.gate_proj.weight", r"^model.layers.(\d+).feed_forward.up_proj.weight": r"model.layers.\1.mlp.up_proj.weight", r"^model.layers.(\d+).feed_forward.down_proj.weight": r"model.layers.\1.mlp.down_proj.weight", r"^model.layers.(\d+).feed_forward.router_gate.weight": r"model.layers.\1.mlp.router_gate.weight", r"^model.layers.(\d+).feed_forward.router_gate.bias": None, r"^model.layers.(\d+).feed_forward.down_embed.weight": r"model.layers.\1.mlp.down_embed.weight", r"^model.layers.(\d+).feed_forward.up_embed.weight": r"model.layers.\1.mlp.up_embed.weight", } # fmt: on def load_weights(input_dir: str): safetensor_files = [os.path.join(input_dir, x) for x in os.listdir(input_dir) if x.endswith(".safetensors")] all_weights = {} if safetensor_files: if len(safetensor_files) == 1: tensors = load_file(safetensor_files[0]) all_weights.update(tensors) return all_weights safetensor_files = sorted(safetensor_files, key=lambda x: int(x.rsplit("-", 3)[1])) for file in safetensor_files: tensors = load_file(file) all_weights.update(tensors) return all_weights else: raise ValueError("No .safetensors or .bin files found in the specified directory.") def map_old_key_to_new(old_key): for pattern, replacement in STATE_DICT_MAPPING.items(): if replacement is None: if re.fullmatch(pattern, old_key): return None else: new_key, n_replace = re.subn(pattern, replacement, old_key) # Early exit of the loop if n_replace > 0: return new_key raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).") def convert_state_dict(original_state_dict: dict, config: DogeConfig): new_dict = {} for old_key, value in original_state_dict.items(): new_key = map_old_key_to_new(old_key) if new_key is None: continue new_dict[new_key] = value return new_dict def convert_doge_model(input_dir, output_dir): # Load and convert config with open(os.path.join(input_dir, "config.json")) as f: config = json.load(f) config = DogeConfig(**config) config.save_pretrained(output_dir) # Load and convert weights original_state_dict = load_weights(input_dir) new_dict = convert_state_dict(original_state_dict, config) with torch.device("meta"): model = DogeForCausalLM(config) if config.tie_word_embeddings: new_dict["lm_head.weight"] = new_dict["model.embed_tokens.weight"] model.load_state_dict(new_dict, strict=True, assign=True) model.save_pretrained(output_dir) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "input_dir", type=str, help="Location of the local folder copied from the Hub.", ) parser.add_argument( "output_dir", type=str, help="Location to write HF model.", ) args = parser.parse_args() convert_doge_model(args.input_dir, args.output_dir)
transformers/src/transformers/models/doge/convert_doge_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/doge/convert_doge_weights_to_hf.py", "repo_id": "transformers", "token_count": 2100 }
467
# coding=utf-8 # Copyright 2010, DPR authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DPR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DPRConfig(PretrainedConfig): r""" [`DPRConfig`] is the configuration class to store the configuration of a *DPRModel*. This is the configuration class to store the configuration of a [`DPRContextEncoder`], [`DPRQuestionEncoder`], or a [`DPRReader`]. It is used to instantiate the components of the DPR model according to the specified arguments, defining the model component architectures. Instantiating a configuration with the defaults will yield a similar configuration to that of the DPRContextEncoder [facebook/dpr-ctx_encoder-single-nq-base](https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base) architecture. This class is a subclass of [`BertConfig`]. Please check the superclass for the documentation of all kwargs. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the DPR model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`BertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). projection_dim (`int`, *optional*, defaults to 0): Dimension of the projection for the context and question encoders. If it is set to zero (default), then no projection is done. Example: ```python >>> from transformers import DPRConfig, DPRContextEncoder >>> # Initializing a DPR facebook/dpr-ctx_encoder-single-nq-base style configuration >>> configuration = DPRConfig() >>> # Initializing a model (with random weights) from the facebook/dpr-ctx_encoder-single-nq-base style configuration >>> model = DPRContextEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "dpr" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", projection_dim: int = 0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.projection_dim = projection_dim self.position_embedding_type = position_embedding_type __all__ = ["DPRConfig"]
transformers/src/transformers/models/dpr/configuration_dpr.py/0
{ "file_path": "transformers/src/transformers/models/dpr/configuration_dpr.py", "repo_id": "transformers", "token_count": 2344 }
468
# coding=utf-8 # Copyright 2022 Intel Labs, OpenMMLab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch DPT (Dense Prediction Transformers) model. This implementation is heavily inspired by OpenMMLab's implementation, found here: https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/dpt_head.py. """ import collections.abc from dataclasses import dataclass from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging, torch_int from ...utils.backbone_utils import load_backbone from .configuration_dpt import DPTConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Base class for model's outputs that also contains intermediate activations that can be used at later stages. Useful in the context of Vision models.: """ ) class BaseModelOutputWithIntermediateActivations(ModelOutput): r""" last_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_activations (`tuple(torch.FloatTensor)`, *optional*): Intermediate activations that can be used to compute hidden states of the model at various layers. """ last_hidden_states: Optional[torch.FloatTensor] = None intermediate_activations: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Base class for model's outputs that also contains a pooling of the last hidden states as well as intermediate activations that can be used by the model at later stages. """ ) class BaseModelOutputWithPoolingAndIntermediateActivations(ModelOutput): r""" pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) after further processing through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns the classification token after processing through a linear layer and a tanh activation function. The linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. intermediate_activations (`tuple(torch.FloatTensor)`, *optional*): Intermediate activations that can be used to compute hidden states of the model at various layers. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None intermediate_activations: Optional[tuple[torch.FloatTensor, ...]] = None class DPTViTHybridEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config, feature_size=None): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.backbone = load_backbone(config) feature_dim = self.backbone.channels[-1] if len(self.backbone.channels) != 3: raise ValueError(f"Expected backbone to have 3 output features, got {len(self.backbone.channels)}") self.residual_feature_map_index = [0, 1] # Always take the output of the first and second backbone stage if feature_size is None: feat_map_shape = config.backbone_featmap_shape feature_size = feat_map_shape[-2:] feature_dim = feat_map_shape[1] else: feature_size = ( feature_size if isinstance(feature_size, collections.abc.Iterable) else (feature_size, feature_size) ) feature_dim = self.backbone.channels[-1] self.image_size = image_size self.patch_size = patch_size[0] self.num_channels = num_channels self.projection = nn.Conv2d(feature_dim, hidden_size, kernel_size=1) self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = torch_int(len(posemb_grid) ** 0.5) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode="bilinear") posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False, return_dict: bool = False ) -> torch.Tensor: batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if not interpolate_pos_encoding: if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size[0]}*{self.image_size[1]})." ) position_embeddings = self._resize_pos_embed( self.position_embeddings, height // self.patch_size, width // self.patch_size ) backbone_output = self.backbone(pixel_values) features = backbone_output.feature_maps[-1] # Retrieve also the intermediate activations to use them at later stages output_hidden_states = [backbone_output.feature_maps[index] for index in self.residual_feature_map_index] embeddings = self.projection(features).flatten(2).transpose(1, 2) cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token embeddings = embeddings + position_embeddings if not return_dict: return (embeddings, output_hidden_states) # Return hidden states and intermediate activations return BaseModelOutputWithIntermediateActivations( last_hidden_states=embeddings, intermediate_activations=output_hidden_states, ) class DPTViTEmbeddings(nn.Module): """ Construct the CLS token, position and patch embeddings. """ def __init__(self, config): super().__init__() self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.patch_embeddings = DPTViTPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def _resize_pos_embed(self, posemb, grid_size_height, grid_size_width, start_index=1): posemb_tok = posemb[:, :start_index] posemb_grid = posemb[0, start_index:] old_grid_size = torch_int(posemb_grid.size(0) ** 0.5) posemb_grid = posemb_grid.reshape(1, old_grid_size, old_grid_size, -1).permute(0, 3, 1, 2) posemb_grid = nn.functional.interpolate(posemb_grid, size=(grid_size_height, grid_size_width), mode="bilinear") posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, grid_size_height * grid_size_width, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward(self, pixel_values, return_dict=False): batch_size, num_channels, height, width = pixel_values.shape # possibly interpolate position encodings to handle varying image sizes patch_size = self.config.patch_size position_embeddings = self._resize_pos_embed( self.position_embeddings, height // patch_size, width // patch_size ) embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # add the [CLS] token to the embedded patch tokens cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add positional encoding to each token embeddings = embeddings + position_embeddings embeddings = self.dropout(embeddings) if not return_dict: return (embeddings,) return BaseModelOutputWithIntermediateActivations(last_hidden_states=embeddings) class DPTViTPatchEmbeddings(nn.Module): """ Image to Patch Embedding. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->DPT class DPTSelfAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->DPT class DPTViTSelfOutput(nn.Module): """ The residual connection is defined in DPTLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class DPTViTAttention(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.attention = DPTSelfAttention(config) self.output = DPTViTSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.vit.modeling_vit.ViTAttention.prune_heads def prune_heads(self, heads: set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) # Copied from transformers.models.vit.modeling_vit.ViTAttention.forward def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->DPT class DPTViTIntermediate(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->DPT class DPTViTOutput(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # copied from transformers.models.vit.modeling_vit.ViTLayer with ViTConfig->DPTConfig, ViTAttention->DPTViTAttention, ViTIntermediate->DPTViTIntermediate, ViTOutput->DPTViTOutput class DPTViTLayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: DPTConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = DPTViTAttention(config) self.intermediate = DPTViTIntermediate(config) self.output = DPTViTOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in ViT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # copied from transformers.models.vit.modeling_vit.ViTEncoder with ViTConfig -> DPTConfig, ViTLayer->DPTViTLayer class DPTViTEncoder(nn.Module): def __init__(self, config: DPTConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([DPTViTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class DPTReassembleStage(nn.Module): """ This class reassembles the hidden states of the backbone into image-like feature representations at various resolutions. This happens in 3 stages: 1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to `config.readout_type`. 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`. 3. Resizing the spatial dimensions (height, width). Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() if config.is_hybrid: self._init_reassemble_dpt_hybrid(config) else: self._init_reassemble_dpt(config) self.neck_ignore_stages = config.neck_ignore_stages def _init_reassemble_dpt_hybrid(self, config): r""" " For DPT-Hybrid the first 2 reassemble layers are set to `nn.Identity()`, please check the official implementation: https://github.com/isl-org/DPT/blob/f43ef9e08d70a752195028a51be5e1aff227b913/dpt/vit.py#L438 for more details. """ for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): if i <= 1: self.layers.append(nn.Identity()) elif i > 1: self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type != "project": raise ValueError(f"Readout type {config.readout_type} is not supported for DPT-Hybrid.") # When using DPT-Hybrid the readout type is set to "project". The sanity check is done on the config file self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for i in range(len(config.neck_hidden_sizes)): if i <= 1: self.readout_projects.append(nn.Sequential(nn.Identity())) elif i > 1: self.readout_projects.append( nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) def _init_reassemble_dpt(self, config): for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors): self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor)) if config.readout_type == "project": self.readout_projects = nn.ModuleList() hidden_size = _get_backbone_hidden_size(config) for _ in range(len(config.neck_hidden_sizes)): self.readout_projects.append( nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act]) ) def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone. """ out = [] for i, hidden_state in enumerate(hidden_states): if i not in self.neck_ignore_stages: # reshape to (batch_size, num_channels, height, width) cls_token, hidden_state = hidden_state[:, 0], hidden_state[:, 1:] batch_size, sequence_length, num_channels = hidden_state.shape if patch_height is not None and patch_width is not None: hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) else: size = torch_int(sequence_length**0.5) hidden_state = hidden_state.reshape(batch_size, size, size, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_shape = hidden_state.shape if self.config.readout_type == "project": # reshape to (batch_size, height*width, num_channels) hidden_state = hidden_state.flatten(2).permute((0, 2, 1)) readout = cls_token.unsqueeze(1).expand_as(hidden_state) # concatenate the readout token to the hidden states and project hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1)) # reshape back to (batch_size, num_channels, height, width) hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape) elif self.config.readout_type == "add": hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1) hidden_state = hidden_state.reshape(feature_shape) hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out def _get_backbone_hidden_size(config): if config.backbone_config is not None and config.is_hybrid is False: return config.backbone_config.hidden_size else: return config.hidden_size class DPTReassembleLayer(nn.Module): def __init__(self, config, channels, factor): super().__init__() # projection hidden_size = _get_backbone_hidden_size(config) self.projection = nn.Conv2d(in_channels=hidden_size, out_channels=channels, kernel_size=1) # up/down sampling depending on factor if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: # so should downsample self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=int(1 / factor), padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class DPTFeatureFusionStage(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList() for _ in range(len(config.neck_hidden_sizes)): self.layers.append(DPTFeatureFusionLayer(config)) def forward(self, hidden_states): # reversing the hidden_states, we start from the last hidden_states = hidden_states[::-1] fused_hidden_states = [] fused_hidden_state = None for hidden_state, layer in zip(hidden_states, self.layers): if fused_hidden_state is None: # first layer only uses the last hidden_state fused_hidden_state = layer(hidden_state) else: fused_hidden_state = layer(fused_hidden_state, hidden_state) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class DPTPreActResidualLayer(nn.Module): """ ResidualConvUnit, pre-activate residual unit. Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.use_batch_norm = config.use_batch_norm_in_fusion_residual use_bias_in_fusion_residual = ( config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm ) self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual, ) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual, ) if self.use_batch_norm: self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size) self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) if self.use_batch_norm: hidden_state = self.batch_norm2(hidden_state) return hidden_state + residual class DPTFeatureFusionLayer(nn.Module): """Feature fusion layer, merges feature maps from different stages. Args: config (`[DPTConfig]`): Model configuration class defining the model architecture. align_corners (`bool`, *optional*, defaults to `True`): The align_corner setting for bilinear upsample. """ def __init__(self, config, align_corners=True): super().__init__() self.align_corners = align_corners self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = DPTPreActResidualLayer(config) self.residual_layer2 = DPTPreActResidualLayer(config) def forward(self, hidden_state, residual=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate( residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode="bilinear", align_corners=False ) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) hidden_state = nn.functional.interpolate( hidden_state, scale_factor=2, mode="bilinear", align_corners=self.align_corners ) hidden_state = self.projection(hidden_state) return hidden_state @auto_docstring class DPTPreTrainedModel(PreTrainedModel): config: DPTConfig base_model_prefix = "dpt" main_input_name = "pixel_values" supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, (DPTViTEmbeddings, DPTViTHybridEmbeddings)): module.cls_token.data.zero_() module.position_embeddings.data.zero_() @auto_docstring class DPTModel(DPTPreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config # vit encoder if config.is_hybrid: self.embeddings = DPTViTHybridEmbeddings(config) else: self.embeddings = DPTViTEmbeddings(config) self.encoder = DPTViTEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.pooler = DPTViTPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): if self.config.is_hybrid: return self.embeddings else: return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndIntermediateActivations]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, return_dict=return_dict) embedding_last_hidden_states = embedding_output[0] if not return_dict else embedding_output.last_hidden_states encoder_outputs = self.encoder( embedding_last_hidden_states, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] + embedding_output[1:] return BaseModelOutputWithPoolingAndIntermediateActivations( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, intermediate_activations=embedding_output.intermediate_activations, ) # Copied from transformers.models.vit.modeling_vit.ViTPooler with ViT->DPT class DPTViTPooler(nn.Module): def __init__(self, config: DPTConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, config.pooler_output_size) self.activation = ACT2FN[config.pooler_act] def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class DPTNeck(nn.Module): """ DPTNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as input and produces another list of tensors as output. For DPT, it includes 2 stages: * DPTReassembleStage * DPTFeatureFusionStage. Args: config (dict): config dict. """ def __init__(self, config): super().__init__() self.config = config # postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT) if config.backbone_config is not None and config.backbone_config.model_type in ["swinv2"]: self.reassemble_stage = None else: self.reassemble_stage = DPTReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) # fusion self.fusion_stage = DPTFeatureFusionStage(config) def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states if self.reassemble_stage is not None: hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features) return output class DPTDepthEstimationHead(nn.Module): """ Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples the predictions to the input resolution after the first convolutional layer (details can be found in the paper's supplementary material). """ def __init__(self, config): super().__init__() self.config = config self.projection = None if config.add_projection: self.projection = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(), ) def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor: # use last features hidden_states = hidden_states[self.config.head_in_index] if self.projection is not None: hidden_states = self.projection(hidden_states) hidden_states = nn.ReLU()(hidden_states) predicted_depth = self.head(hidden_states) predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @auto_docstring( custom_intro=""" DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. """ ) class DPTForDepthEstimation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.backbone = None if config.is_hybrid is False and (config.backbone_config is not None or config.backbone is not None): self.backbone = load_backbone(config) else: self.dpt = DPTModel(config, add_pooling_layer=False) # Neck self.neck = DPTNeck(config) # Depth estimation head self.head = DPTDepthEstimationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth depth estimation maps for computing the loss. Examples: ```python >>> from transformers import AutoImageProcessor, DPTForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large") >>> model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... target_sizes=[(image.height, image.width)], ... ) >>> # visualize the prediction >>> predicted_depth = post_processed_output[0]["predicted_depth"] >>> depth = predicted_depth * 255 / predicted_depth.max() >>> depth = depth.detach().cpu().numpy() >>> depth = Image.fromarray(depth.astype("uint8")) ```""" loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions if self.backbone is not None: outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) hidden_states = outputs.feature_maps else: outputs = self.dpt( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] # only keep certain features based on config.backbone_out_indices # note that the hidden_states also include the initial embeddings if not self.config.is_hybrid: hidden_states = [ feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices ] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend( feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:] ) hidden_states = backbone_hidden_states patch_height, patch_width = None, None if self.config.backbone_config is not None and self.config.is_hybrid is False: _, _, height, width = pixel_values.shape patch_size = self.config.backbone_config.patch_size patch_height = height // patch_size patch_width = width // patch_size hidden_states = self.neck(hidden_states, patch_height, patch_width) predicted_depth = self.head(hidden_states) if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return ((loss,) + output) if loss is not None else output return DepthEstimatorOutput( loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) class DPTSemanticSegmentationHead(nn.Module): def __init__(self, config): super().__init__() self.config = config features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(config.semantic_classifier_dropout), nn.Conv2d(features, config.num_labels, kernel_size=1), nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), ) def forward(self, hidden_states: list[torch.Tensor]) -> torch.Tensor: # use last features hidden_states = hidden_states[self.config.head_in_index] logits = self.head(hidden_states) return logits class DPTAuxiliaryHead(nn.Module): def __init__(self, config): super().__init__() features = config.fusion_hidden_size self.head = nn.Sequential( nn.Conv2d(features, features, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(features), nn.ReLU(), nn.Dropout(0.1, False), nn.Conv2d(features, config.num_labels, kernel_size=1), ) def forward(self, hidden_states): logits = self.head(hidden_states) return logits @auto_docstring class DPTForSemanticSegmentation(DPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.dpt = DPTModel(config, add_pooling_layer=False) # Neck self.neck = DPTNeck(config) # Segmentation head(s) self.head = DPTSemanticSegmentationHead(config) self.auxiliary_head = DPTAuxiliaryHead(config) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SemanticSegmenterOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). Examples: ```python >>> from transformers import AutoImageProcessor, DPTForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("Intel/dpt-large-ade") >>> model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if labels is not None and self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one") outputs = self.dpt( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] # only keep certain features based on config.backbone_out_indices # note that the hidden_states also include the initial embeddings if not self.config.is_hybrid: hidden_states = [ feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices ] else: backbone_hidden_states = outputs.intermediate_activations if return_dict else list(outputs[-1]) backbone_hidden_states.extend( feature for idx, feature in enumerate(hidden_states[1:]) if idx in self.config.backbone_out_indices[2:] ) hidden_states = backbone_hidden_states hidden_states = self.neck(hidden_states=hidden_states) logits = self.head(hidden_states) auxiliary_logits = None if self.auxiliary_head is not None: auxiliary_logits = self.auxiliary_head(hidden_states[-1]) loss = None if labels is not None: # upsample logits to the images' original size upsampled_logits = nn.functional.interpolate( logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) if auxiliary_logits is not None: upsampled_auxiliary_logits = nn.functional.interpolate( auxiliary_logits, size=labels.shape[-2:], mode="bilinear", align_corners=False ) # compute weighted loss loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) main_loss = loss_fct(upsampled_logits, labels) auxiliary_loss = loss_fct(upsampled_auxiliary_logits, labels) loss = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: output = (logits,) + outputs[1:] else: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = ["DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel"]
transformers/src/transformers/models/dpt/modeling_dpt.py/0
{ "file_path": "transformers/src/transformers/models/dpt/modeling_dpt.py", "repo_id": "transformers", "token_count": 23118 }
469
# coding=utf-8 # Copyright 2023 Meta Platforms, Inc. and affiliates, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch EnCodec model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from ...modeling_utils import PreTrainedAudioTokenizerBase from ...utils import ( ModelOutput, auto_docstring, logging, ) from .configuration_encodec import EncodecConfig logger = logging.get_logger(__name__) # General docstring @dataclass @auto_docstring class EncodecOutput(ModelOutput): r""" audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*): Discrete code embeddings computed using `model.encode`. audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Encodec. """ audio_codes: Optional[torch.LongTensor] = None audio_values: Optional[torch.FloatTensor] = None @dataclass @auto_docstring class EncodecEncoderOutput(ModelOutput): r""" audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*): Discrete code embeddings computed using `model.encode`. audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*): Scaling factor for each `audio_codes` input. This is used to unscale each chunk of audio when decoding. last_frame_pad_length (`int`, *optional*): The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the encoded frames. """ audio_codes: Optional[torch.LongTensor] = None audio_scales: Optional[torch.FloatTensor] = None last_frame_pad_length: Optional[int] = None @dataclass @auto_docstring class EncodecDecoderOutput(ModelOutput): r""" audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Encodec. """ audio_values: Optional[torch.FloatTensor] = None class EncodecConv1d(nn.Module): """Conv1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1 ): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) # warn user on unusual setup between dilation and stride if stride > 1 and dilation > 1: logger.warning( "EncodecConv1d has been initialized with stride > 1 and dilation > 1" f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})." ) self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if self.norm_type == "weight_norm": self.conv = weight_norm(self.conv) elif self.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] # Effective kernel size with dilations. kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer("stride", stride, persistent=False) self.register_buffer("kernel_size", kernel_size, persistent=False) self.register_buffer("padding_total", kernel_size - stride, persistent=False) def _get_extra_padding_for_conv1d( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """See `pad_for_conv1d`.""" length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str = "zero", value: float = 0.0): """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happens. """ length = hidden_states.shape[-1] padding_left, padding_right = paddings if mode != "reflect": return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def forward(self, hidden_states): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if self.causal: # Left padding for causal hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: # Asymmetric padding required for odd strides padding_right = self.padding_total // 2 padding_left = self.padding_total - padding_right hidden_states = self._pad1d( hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode ) hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) return hidden_states class EncodecConvTranspose1d(nn.Module): """ConvTranspose1d with asymmetric or causal padding and normalization.""" def __init__(self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.norm_type = config.norm_type if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if config.norm_type == "weight_norm": self.conv = weight_norm(self.conv) elif config.norm_type == "time_group_norm": self.norm = nn.GroupNorm(1, out_channels) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions") def forward(self, hidden_states): kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride hidden_states = self.conv(hidden_states) if self.norm_type == "time_group_norm": hidden_states = self.norm(hidden_states) # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be # removed at the very end, when keeping only the right length for the output, # as removing it here would require also passing the length at the matching layer # in the encoder. if self.causal: # Trim the padding on the right according to the specified ratio # if trim_right_ratio = 1.0, trim everything from right padding_right = math.ceil(padding_total * self.trim_right_ratio) else: # Asymmetric padding required for odd strides padding_right = padding_total // 2 padding_left = padding_total - padding_right # unpad end = hidden_states.shape[-1] - padding_right hidden_states = hidden_states[..., padding_left:end] return hidden_states class EncodecLSTM(nn.Module): """ LSTM without worrying about the hidden state, nor the layout of the data. Expects input as convolutional layout. """ def __init__(self, config: EncodecConfig, dimension: int): super().__init__() self.lstm = nn.LSTM(dimension, dimension, config.num_lstm_layers) def forward(self, hidden_states): hidden_states = hidden_states.permute(2, 0, 1) hidden_states = self.lstm(hidden_states)[0] + hidden_states hidden_states = hidden_states.permute(1, 2, 0) return hidden_states class EncodecResnetBlock(nn.Module): """ Residual block from SEANet model as used by EnCodec. """ def __init__(self, config: EncodecConfig, dim: int, dilations: list[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError("Number of kernel sizes should match number of dilations") hidden = dim // config.compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states): residual = hidden_states for layer in self.block: hidden_states = layer(hidden_states) return self.shortcut(residual) + hidden_states class EncodecEncoder(nn.Module): """SEANet encoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() model = [EncodecConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 # Downsample to raw audio scale for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])] # Add downsampling layers model += [nn.ELU()] model += [EncodecConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [EncodecLSTM(config, scaling * config.num_filters)] model += [nn.ELU()] model += [EncodecConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecDecoder(nn.Module): """SEANet decoder as used by EnCodec.""" def __init__(self, config: EncodecConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [EncodecConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] model += [EncodecLSTM(config, scaling * config.num_filters)] # Upsample to raw audio scale for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters # Add upsampling layers model += [nn.ELU()] model += [ EncodecConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio) ] # Add residual layers for j in range(config.num_residual_layers): model += [EncodecResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))] scaling //= 2 # Add final layers model += [nn.ELU()] model += [EncodecConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class EncodecEuclideanCodebook(nn.Module): """Codebook with Euclidean distance.""" def __init__(self, config: EncodecConfig): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer("inited", torch.Tensor([True])) self.register_buffer("cluster_size", torch.zeros(config.codebook_size)) self.register_buffer("embed", embed) self.register_buffer("embed_avg", embed.clone()) def quantize(self, hidden_states): embed = self.embed.t() scaled_states = hidden_states.pow(2).sum(1, keepdim=True) dist = -(scaled_states - 2 * hidden_states @ embed + embed.pow(2).sum(0, keepdim=True)) embed_ind = dist.max(dim=-1).indices return embed_ind def encode(self, hidden_states): shape = hidden_states.shape # pre-process hidden_states = hidden_states.reshape((-1, shape[-1])) # quantize embed_ind = self.quantize(hidden_states) # post-process embed_ind = embed_ind.view(*shape[:-1]) return embed_ind def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize class EncodecVectorQuantization(nn.Module): """ Vector quantization implementation. Currently supports only euclidean distance. """ def __init__(self, config: EncodecConfig): super().__init__() self.codebook = EncodecEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize class EncodecResidualVectorQuantizer(nn.Module): """Residual Vector Quantizer.""" def __init__(self, config: EncodecConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = config.num_quantizers self.layers = nn.ModuleList([EncodecVectorQuantization(config) for _ in range(config.num_quantizers)]) def get_num_quantizers_for_bandwidth(self, bandwidth: Optional[float] = None) -> int: """Return num_quantizers based on specified target bandwidth.""" bw_per_q = math.log2(self.codebook_size) * self.frame_rate num_quantizers = self.num_quantizers if bandwidth is not None and bandwidth > 0.0: num_quantizers = int(max(1, math.floor(bandwidth * 1000 / bw_per_q))) return num_quantizers def encode(self, embeddings: torch.Tensor, bandwidth: Optional[float] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given bandwidth. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ num_quantizers = self.get_num_quantizers_for_bandwidth(bandwidth) residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes to the quantized representation.""" quantized_out = torch.tensor(0.0, device=codes.device) for i, indices in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized return quantized_out @auto_docstring class EncodecPreTrainedModel(PreTrainedAudioTokenizerBase): config: EncodecConfig base_model_prefix = "encodec" main_input_name = "input_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.GroupNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.ConvTranspose1d): module.reset_parameters() elif isinstance(module, nn.LSTM): for name, param in module.named_parameters(): if "weight" in name: nn.init.xavier_uniform_(param) elif "bias" in name: nn.init.constant_(param, 0.0) @auto_docstring( custom_intro=""" The EnCodec neural audio codec model. """ ) class EncodecModel(EncodecPreTrainedModel): def __init__(self, config: EncodecConfig): super().__init__(config) self.config = config self.encoder = EncodecEncoder(config) self.decoder = EncodecDecoder(config) self.quantizer = EncodecResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame( self, input_values: torch.Tensor, bandwidth: float ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """ Encodes the given input using the underlying VQVAE. If `config.normalize` is set to `True` the input is first normalized. The padding mask is required to compute the correct scale. """ length = input_values.shape[-1] duration = length / self.config.sampling_rate if self.config.chunk_length_s is not None and duration > 1e-5 + self.config.chunk_length_s: raise RuntimeError(f"Duration of frame ({duration}) is longer than chunk {self.config.chunk_length_s}") scale = None if self.config.normalize: mono = torch.sum(input_values, 1, keepdim=True) / input_values.shape[1] scale = mono.pow(2).mean(dim=-1, keepdim=True).sqrt() + 1e-8 input_values = input_values / scale scale = scale.view(-1, 1) embeddings = self.encoder(input_values) codes = self.quantizer.encode(embeddings, bandwidth) codes = codes.transpose(0, 1) return codes, scale def encode( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, bandwidth: Optional[float] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, Optional[torch.Tensor], int], EncodecEncoderOutput]: """ Encodes the input audio waveform into discrete codes of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`. - `nb_frames=1` if `self.config.chunk_length=None` (as the encoder is applied on the full audio), which is the case for the 24kHz model. Otherwise, `nb_frames=ceil(input_length/self.config.chunk_stride)`, which is the case for the 48kHz model. - `frame_len` is the length of each frame, which is equal to `ceil(input_length/self.config.hop_length)` if `self.config.chunk_length=None` (e.g., for the 24kHz model). Otherwise, if `self.config.chunk_length` is defined, `frame_len=self.config.chunk_length/self.config.hop_length`, e.g., the case for the 48kHz model with `frame_len=150`. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Padding mask used to pad the `input_values`. bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as bandwidth == 6.0 Returns: EncodecEncoderOutput dict or a tuple containing: - audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*), - audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*), - last_frame_pad_length (`int`, *optional*). """ return_dict = return_dict if return_dict is not None else self.config.return_dict if bandwidth is None: bandwidth = self.config.target_bandwidths[0] if bandwidth not in self.config.target_bandwidths: raise ValueError( f"This model doesn't support the bandwidth {bandwidth}. Select one of {self.config.target_bandwidths}." ) _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}") chunk_length = self.config.chunk_length if chunk_length is None: chunk_length = input_length stride = input_length else: stride = self.config.chunk_stride if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() else: padding_mask = padding_mask.view(padding_mask.shape[0], -1, padding_mask.shape[-1]) encoded_frames = [] scales = [] for offset in range(0, input_length, stride): mask = padding_mask[..., offset : offset + chunk_length].bool() frame = mask * input_values[..., offset : offset + chunk_length] encoded_frame, scale = self._encode_frame(frame, bandwidth) encoded_frames.append(encoded_frame) scales.append(scale) # pad last frame (if necessary) to be able to apply `torch.stack` last_frame_pad_length = encoded_frames[0].shape[-1] - encoded_frames[-1].shape[-1] if last_frame_pad_length > 0: last_frame = nn.functional.pad(encoded_frames[-1], (0, last_frame_pad_length), value=0) encoded_frames[-1] = last_frame encoded_frames = torch.stack(encoded_frames) if not return_dict: return (encoded_frames, scales, last_frame_pad_length) return EncodecEncoderOutput(encoded_frames, scales, last_frame_pad_length) @staticmethod def _linear_overlap_add(frames: list[torch.Tensor], stride: int): # Generic overlap add, with linear fade-in/fade-out, supporting complex scenario # e.g., more than 2 frames per position. # The core idea is to use a weight function that is a triangle, # with a maximum value at the middle of the chunk. # We use this weighting when summing the frames, and divide by the sum of weights # for each positions at the end. Thus: # - if a frame is the only one to cover a position, the weighting is a no-op. # - if 2 frames cover a position: # ... ... # / \/ \ # / /\ \ # S T , i.e. S offset of second frame starts, T end of first frame. # Then the weight function for each one is: (t - S), (T - t), with `t` a given offset. # After the final normalization, the weight of the second frame at position `t` is # (t - S) / (t - S + (T - t)) = (t - S) / (T - S), which is exactly what we want. # # - if more than 2 frames overlap at a given point, we hope that by induction # something sensible happens. if len(frames) == 0: raise ValueError("`frames` cannot be an empty list.") device = frames[0].device dtype = frames[0].dtype shape = frames[0].shape[:-1] total_size = stride * (len(frames) - 1) + frames[-1].shape[-1] frame_length = frames[0].shape[-1] time_vec = torch.linspace(0, 1, frame_length + 2, device=device, dtype=dtype)[1:-1] weight = 0.5 - (time_vec - 0.5).abs() sum_weight = torch.zeros(total_size, device=device, dtype=dtype) out = torch.zeros(*shape, total_size, device=device, dtype=dtype) offset: int = 0 for frame in frames: frame_length = frame.shape[-1] out[..., offset : offset + frame_length] += weight[:frame_length] * frame sum_weight[offset : offset + frame_length] += weight[:frame_length] offset += stride if sum_weight.min() == 0: raise ValueError(f"`sum_weight` minimum element must be bigger than zero: {sum_weight}`") return out / sum_weight def _decode_frame(self, codes: torch.Tensor, scale: Optional[torch.Tensor] = None) -> torch.Tensor: codes = codes.transpose(0, 1) embeddings = self.quantizer.decode(codes) outputs = self.decoder(embeddings) if scale is not None: outputs = outputs * scale.view(-1, 1, 1) return outputs def decode( self, audio_codes: torch.LongTensor, audio_scales: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, last_frame_pad_length: Optional[int] = 0, ) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecDecoderOutput]: """ Decodes the given frames into an output audio waveform. Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be trimmed. Args: audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*): Discrete code embeddings computed using `model.encode`. audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*): Scaling factor for each `audio_codes` input. padding_mask (`torch.Tensor` of shape `(channels, sequence_length)`): Padding mask used to pad the `input_values`. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. last_frame_pad_length (`int`, *optional*): Integer representing the length of the padding in the last frame, which is removed during decoding. """ return_dict = return_dict if return_dict is not None else self.config.return_dict chunk_length = self.config.chunk_length if chunk_length is None: if len(audio_codes) != 1: raise ValueError(f"Expected one frame, got {len(audio_codes)}") frame = audio_codes[0] if last_frame_pad_length > 0: frame = frame[..., :-last_frame_pad_length] audio_values = self._decode_frame(frame, audio_scales[0]) else: decoded_frames = [] for i, (frame, scale) in enumerate(zip(audio_codes, audio_scales)): if i == len(audio_codes) - 1 and last_frame_pad_length > 0: frame = frame[..., :-last_frame_pad_length] frames = self._decode_frame(frame, scale) decoded_frames.append(frames) audio_values = self._linear_overlap_add(decoded_frames, self.config.chunk_stride or 1) # truncate based on padding mask if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., : padding_mask.shape[-1]] if not return_dict: return (audio_values,) return EncodecDecoderOutput(audio_values) @auto_docstring def forward( self, input_values: torch.FloatTensor, padding_mask: Optional[torch.BoolTensor] = None, bandwidth: Optional[float] = None, audio_codes: Optional[torch.LongTensor] = None, audio_scales: Optional[torch.Tensor] = None, return_dict: Optional[bool] = None, last_frame_pad_length: Optional[int] = 0, ) -> Union[tuple[torch.Tensor, torch.Tensor], EncodecOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*): Raw audio input converted to Float and padded to the appropriate length in order to be encoded using chunks of length self.chunk_length and a stride of `config.chunk_stride`. padding_mask (`torch.BoolTensor` of shape `(batch_size, channels, sequence_length)`, *optional*): Mask to avoid computing scaling factors on padding token indices (can we avoid computing conv on these+). Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. <Tip warning={true}> `padding_mask` should always be passed, unless the input was truncated or not padded. This is because in order to process tensors effectively, the input audio should be padded so that `input_length % stride = step` with `step = chunk_length-stride`. This ensures that all chunks are of the same shape </Tip> bandwidth (`float`, *optional*): The target bandwidth. Must be one of `config.target_bandwidths`. If `None`, uses the smallest possible bandwidth. bandwidth is represented as a thousandth of what it is, e.g. 6kbps bandwidth is represented as `bandwidth == 6.0` audio_codes (`torch.LongTensor` of shape `(nb_frames, batch_size, nb_quantizers, frame_len)`, *optional*): Discrete code embeddings computed using `model.encode`. audio_scales (list of length `nb_frames` of `torch.Tensor` of shape `(batch_size, 1)`, *optional*): Scaling factor for each `audio_codes` input. return_dict (`bool`, *optional*): Whether to return outputs as a dict. last_frame_pad_length (`int`, *optional*): The length of the padding in the last frame, if any. This is used to ensure that the encoded frames can be outputted as a tensor. This value should be passed during decoding to ensure padding is removed from the encoded frames. Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, EncodecModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model_id = "facebook/encodec_24khz" >>> model = EncodecModel.from_pretrained(model_id) >>> processor = AutoProcessor.from_pretrained(model_id) >>> inputs = processor(raw_audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_codes = outputs.audio_codes >>> audio_values = outputs.audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() else: # ensure that channel dimension is present padding_mask = padding_mask.view(padding_mask.shape[0], -1, padding_mask.shape[-1]) if audio_codes is not None and audio_scales is None: raise ValueError("You specified `audio_codes` but did not specify the `audio_scales`") if audio_scales is not None and audio_codes is None: raise ValueError("You specified `audio_scales` but did not specify the `audio_codes`") if audio_scales is None and audio_codes is None: audio_codes, audio_scales, last_frame_pad_length = self.encode( input_values, padding_mask, bandwidth, False ) audio_values = self.decode( audio_codes, audio_scales, padding_mask, return_dict=return_dict, last_frame_pad_length=last_frame_pad_length, )[0] if not return_dict: return (audio_codes, audio_values) return EncodecOutput(audio_codes=audio_codes, audio_values=audio_values) __all__ = ["EncodecModel", "EncodecPreTrainedModel"]
transformers/src/transformers/models/encodec/modeling_encodec.py/0
{ "file_path": "transformers/src/transformers/models/encodec/modeling_encodec.py", "repo_id": "transformers", "token_count": 14457 }
470
# Copyright 2021 AlQuraishi Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import math from collections.abc import Iterable, Sequence from functools import partial from typing import Any, Callable, Optional, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> list[tuple[int, ...]]: shapes = [] if isinstance(tree, dict): for v in tree.values(): shapes.extend(_fetch_dims(v)) elif isinstance(tree, (list, tuple)): for t in tree: shapes.extend(_fetch_dims(t)) elif isinstance(tree, torch.Tensor): shapes.append(tree.shape) else: raise TypeError("Not supported") return shapes @torch.jit.ignore def _flat_idx_to_idx(flat_idx: int, dims: tuple[int, ...]) -> tuple[int, ...]: idx = [] for d in reversed(dims): idx.append(flat_idx % d) flat_idx = flat_idx // d return tuple(reversed(idx)) @torch.jit.ignore def _get_minimal_slice_set( start: Sequence[int], end: Sequence[int], dims: Sequence[int], start_edges: Optional[Sequence[bool]] = None, end_edges: Optional[Sequence[bool]] = None, ) -> list[tuple[slice, ...]]: """ Produces an ordered sequence of tensor slices that, when used in sequence on a tensor with shape dims, yields tensors that contain every leaf in the contiguous range [start, end]. Care is taken to yield a short sequence of slices, and perhaps even the shortest possible (I'm pretty sure it's the latter). end is INCLUSIVE. """ # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(l: list[bool]) -> None: tally = True for i in range(len(l)): reversed_idx = -1 * (i + 1) l[reversed_idx] &= tally tally = l[reversed_idx] if start_edges is None: start_edges = [s == 0 for s in start] reduce_edge_list(start_edges) if end_edges is None: end_edges = [e == (d - 1) for e, d in zip(end, dims)] reduce_edge_list(end_edges) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(start) == 0: return [()] elif len(start) == 1: return [(slice(start[0], end[0] + 1),)] slices: list[tuple[slice, ...]] = [] path_list: list[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(start, end): if s == e: path_list.append(slice(s, s + 1)) else: break path: tuple[slice, ...] = tuple(path_list) divergence_idx = len(path) # start == end, and we're done if divergence_idx == len(dims): return [path] def upper() -> tuple[tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None sdi = start[divergence_idx] return tuple( path + (slice(sdi, sdi + 1),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) ) def lower() -> tuple[tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None edi = end[divergence_idx] return tuple( path + (slice(edi, edi + 1),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1),)) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx], end[divergence_idx]),)) slices.extend(lower()) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper()) slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1),)) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper()) middle_ground = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx]),)) slices.extend(lower()) return slices @torch.jit.ignore def _chunk_slice(t: torch.Tensor, flat_start: int, flat_end: int, no_batch_dims: int) -> torch.Tensor: """ Equivalent to t.reshape((-1,) + t.shape[no_batch_dims:])[flat_start:flat_end] but without the need for the initial reshape call, which can be memory-intensive in certain situations. The only reshape operations in this function are performed on sub-tensors that scale with (flat_end - flat_start), the chunk size. """ batch_dims = t.shape[:no_batch_dims] start_idx = list(_flat_idx_to_idx(flat_start, batch_dims)) # _get_minimal_slice_set is inclusive end_idx = list(_flat_idx_to_idx(flat_end - 1, batch_dims)) # Get an ordered list of slices to perform slices = _get_minimal_slice_set( start_idx, end_idx, batch_dims, ) sliced_tensors = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors]) def chunk_layer( layer: Callable, inputs: dict[str, Any], chunk_size: int, no_batch_dims: int, low_mem: bool = False, _out: Any = None, _add_into_out: bool = False, ) -> Any: """ Implements the "chunking" procedure described in section 1.11.8. Layer outputs and inputs are assumed to be simple "pytrees," consisting only of (arbitrarily nested) lists, tuples, and dicts with torch.Tensor leaves. Args: layer: The layer to be applied chunk-wise inputs: A (non-nested) dictionary of keyworded inputs. All leaves must be tensors and must share the same batch dimensions. chunk_size: The number of sub-batches per chunk. If multiple batch dimensions are specified, a "sub-batch" is defined as a single indexing of all batch dimensions simultaneously (s.t. the number of sub-batches is the product of the batch dimensions). no_batch_dims: How many of the initial dimensions of each input tensor can be considered batch dimensions. low_mem: Avoids flattening potentially large input tensors. Unnecessary in most cases, and is ever so slightly slower than the default setting. Returns: The reassembled output of the layer on the inputs. """ if not (len(inputs) > 0): raise ValueError("Must provide at least one input") initial_dims = [shape[:no_batch_dims] for shape in _fetch_dims(inputs)] orig_batch_dims = tuple(max(s) for s in zip(*initial_dims)) def _prep_inputs(t: torch.Tensor) -> torch.Tensor: if not low_mem: if sum(t.shape[:no_batch_dims]) != no_batch_dims: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) t = t.reshape(-1, *t.shape[no_batch_dims:]) else: t = t.expand(orig_batch_dims + t.shape[no_batch_dims:]) return t prepped_inputs: dict[str, Any] = tensor_tree_map(_prep_inputs, inputs) prepped_outputs = None if _out is not None: prepped_outputs = tensor_tree_map(lambda t: t.view([-1] + list(t.shape[no_batch_dims:])), _out) flat_batch_dim = 1 for d in orig_batch_dims: flat_batch_dim *= d no_chunks = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(t: torch.Tensor) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t i = 0 out = prepped_outputs for _ in range(no_chunks): # Chunk the input if not low_mem: select_chunk = _select_chunk else: select_chunk = partial( _chunk_slice, flat_start=i, flat_end=min(flat_batch_dim, i + chunk_size), no_batch_dims=len(orig_batch_dims), ) chunks: dict[str, Any] = tensor_tree_map(select_chunk, prepped_inputs) # Run the layer on the chunk output_chunk = layer(**chunks) # Allocate space for the output if out is None: out = tensor_tree_map(lambda t: t.new_zeros((flat_batch_dim,) + t.shape[1:]), output_chunk) # Put the chunk in its pre-allocated space if isinstance(output_chunk, dict): def assign(d1: dict, d2: dict) -> None: for k, v in d1.items(): if isinstance(v, dict): assign(v, d2[k]) else: if _add_into_out: v[i : i + chunk_size] += d2[k] else: v[i : i + chunk_size] = d2[k] assign(out, output_chunk) elif isinstance(output_chunk, tuple): for x1, x2 in zip(out, output_chunk): if _add_into_out: x1[i : i + chunk_size] += x2 else: x1[i : i + chunk_size] = x2 elif isinstance(output_chunk, torch.Tensor): if _add_into_out: out[i : i + chunk_size] += output_chunk else: out[i : i + chunk_size] = output_chunk else: raise TypeError("Not supported") i += chunk_size out = tensor_tree_map(lambda t: t.view(orig_batch_dims + t.shape[1:]), out) return out class ChunkSizeTuner: def __init__( self, # Heuristically, runtimes for most of the modules in the network # plateau earlier than this on all GPUs I've run the model on. max_chunk_size: int = 512, ): self.max_chunk_size = max_chunk_size self.cached_chunk_size: Optional[int] = None self.cached_arg_data: Optional[tuple] = None def _determine_favorable_chunk_size(self, fn: Callable, args: tuple, min_chunk_size: int) -> int: logging.info("Tuning chunk size...") if min_chunk_size >= self.max_chunk_size: return min_chunk_size candidates: list[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)] candidates = [c for c in candidates if c > min_chunk_size] candidates = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(chunk_size: int) -> bool: try: with torch.no_grad(): fn(*args, chunk_size=chunk_size) return True except RuntimeError: return False min_viable_chunk_size_index = 0 i = len(candidates) - 1 while i > min_viable_chunk_size_index: viable = test_chunk_size(candidates[i]) if not viable: i = (min_viable_chunk_size_index + i) // 2 else: min_viable_chunk_size_index = i i = (i + len(candidates) - 1) // 2 return candidates[min_viable_chunk_size_index] def _compare_arg_caches(self, ac1: Iterable, ac2: Iterable) -> bool: consistent = True for a1, a2 in zip(ac1, ac2): assert type(ac1) is type(ac2) if isinstance(ac1, (list, tuple)): consistent &= self._compare_arg_caches(a1, a2) elif isinstance(ac1, dict): a1_items = [v for _, v in sorted(a1.items(), key=lambda x: x[0])] a2_items = [v for _, v in sorted(a2.items(), key=lambda x: x[0])] consistent &= self._compare_arg_caches(a1_items, a2_items) else: consistent &= a1 == a2 return consistent def tune_chunk_size( self, representative_fn: Callable, args: tuple, min_chunk_size: int, ) -> int: consistent = True arg_data: tuple = tree_map(lambda a: a.shape if isinstance(a, torch.Tensor) else a, args, object) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data) == len(arg_data) consistent = self._compare_arg_caches(self.cached_arg_data, arg_data) else: # Otherwise, we can reuse the precomputed value consistent = False if not consistent: self.cached_chunk_size = self._determine_favorable_chunk_size( representative_fn, args, min_chunk_size, ) self.cached_arg_data = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
transformers/src/transformers/models/esm/openfold_utils/chunk_utils.py/0
{ "file_path": "transformers/src/transformers/models/esm/openfold_utils/chunk_utils.py", "repo_id": "transformers", "token_count": 6577 }
471
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FastSpeech2Conformer model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FastSpeech2ConformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FastSpeech2ConformerModel`]. It is used to instantiate a FastSpeech2Conformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2Conformer [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 384): The dimensionality of the hidden layers. vocab_size (`int`, *optional*, defaults to 78): The size of the vocabulary. num_mel_bins (`int`, *optional*, defaults to 80): The number of mel filters used in the filter bank. encoder_num_attention_heads (`int`, *optional*, defaults to 2): The number of attention heads in the encoder. encoder_layers (`int`, *optional*, defaults to 4): The number of layers in the encoder. encoder_linear_units (`int`, *optional*, defaults to 1536): The number of units in the linear layer of the encoder. decoder_layers (`int`, *optional*, defaults to 4): The number of layers in the decoder. decoder_num_attention_heads (`int`, *optional*, defaults to 2): The number of attention heads in the decoder. decoder_linear_units (`int`, *optional*, defaults to 1536): The number of units in the linear layer of the decoder. speech_decoder_postnet_layers (`int`, *optional*, defaults to 5): The number of layers in the post-net of the speech decoder. speech_decoder_postnet_units (`int`, *optional*, defaults to 256): The number of units in the post-net layers of the speech decoder. speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5): The kernel size in the post-net of the speech decoder. positionwise_conv_kernel_size (`int`, *optional*, defaults to 3): The size of the convolution kernel used in the position-wise layer. encoder_normalize_before (`bool`, *optional*, defaults to `False`): Specifies whether to normalize before encoder layers. decoder_normalize_before (`bool`, *optional*, defaults to `False`): Specifies whether to normalize before decoder layers. encoder_concat_after (`bool`, *optional*, defaults to `False`): Specifies whether to concatenate after encoder layers. decoder_concat_after (`bool`, *optional*, defaults to `False`): Specifies whether to concatenate after decoder layers. reduction_factor (`int`, *optional*, defaults to 1): The factor by which the speech frame rate is reduced. speaking_speed (`float`, *optional*, defaults to 1.0): The speed of the speech produced. use_macaron_style_in_conformer (`bool`, *optional*, defaults to `True`): Specifies whether to use macaron style in the conformer. use_cnn_in_conformer (`bool`, *optional*, defaults to `True`): Specifies whether to use convolutional neural networks in the conformer. encoder_kernel_size (`int`, *optional*, defaults to 7): The kernel size used in the encoder. decoder_kernel_size (`int`, *optional*, defaults to 31): The kernel size used in the decoder. duration_predictor_layers (`int`, *optional*, defaults to 2): The number of layers in the duration predictor. duration_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the duration predictor. duration_predictor_kernel_size (`int`, *optional*, defaults to 3): The kernel size used in the duration predictor. energy_predictor_layers (`int`, *optional*, defaults to 2): The number of layers in the energy predictor. energy_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the energy predictor. energy_predictor_kernel_size (`int`, *optional*, defaults to 3): The kernel size used in the energy predictor. energy_predictor_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the energy predictor. energy_embed_kernel_size (`int`, *optional*, defaults to 1): The kernel size used in the energy embed layer. energy_embed_dropout (`float`, *optional*, defaults to 0.0): The dropout rate in the energy embed layer. stop_gradient_from_energy_predictor (`bool`, *optional*, defaults to `False`): Specifies whether to stop gradients from the energy predictor. pitch_predictor_layers (`int`, *optional*, defaults to 5): The number of layers in the pitch predictor. pitch_predictor_channels (`int`, *optional*, defaults to 256): The number of channels in the pitch predictor. pitch_predictor_kernel_size (`int`, *optional*, defaults to 5): The kernel size used in the pitch predictor. pitch_predictor_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the pitch predictor. pitch_embed_kernel_size (`int`, *optional*, defaults to 1): The kernel size used in the pitch embed layer. pitch_embed_dropout (`float`, *optional*, defaults to 0.0): The dropout rate in the pitch embed layer. stop_gradient_from_pitch_predictor (`bool`, *optional*, defaults to `True`): Specifies whether to stop gradients from the pitch predictor. encoder_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the encoder. encoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2): The positional dropout rate in the encoder. encoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2): The attention dropout rate in the encoder. decoder_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the decoder. decoder_positional_dropout_rate (`float`, *optional*, defaults to 0.2): The positional dropout rate in the decoder. decoder_attention_dropout_rate (`float`, *optional*, defaults to 0.2): The attention dropout rate in the decoder. duration_predictor_dropout_rate (`float`, *optional*, defaults to 0.2): The dropout rate in the duration predictor. speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5): The dropout rate in the speech decoder postnet. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. use_masking (`bool`, *optional*, defaults to `True`): Specifies whether to use masking in the model. use_weighted_masking (`bool`, *optional*, defaults to `False`): Specifies whether to use weighted masking in the model. num_speakers (`int`, *optional*): Number of speakers. If set to > 1, assume that the speaker ids will be provided as the input and use speaker id embedding layer. num_languages (`int`, *optional*): Number of languages. If set to > 1, assume that the language ids will be provided as the input and use the language id embedding layer. speaker_embed_dim (`int`, *optional*): Speaker embedding dimension. If set to > 0, assume that speaker_embedding will be provided as the input. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Specifies whether the model is an encoder-decoder. Example: ```python >>> from transformers import FastSpeech2ConformerModel, FastSpeech2ConformerConfig >>> # Initializing a FastSpeech2Conformer style configuration >>> configuration = FastSpeech2ConformerConfig() >>> # Initializing a model from the FastSpeech2Conformer style configuration >>> model = FastSpeech2ConformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "fastspeech2_conformer" base_config_key = "model_config" attribute_map = {"num_hidden_layers": "encoder_layers", "num_attention_heads": "encoder_num_attention_heads"} def __init__( self, hidden_size=384, vocab_size=78, num_mel_bins=80, encoder_num_attention_heads=2, encoder_layers=4, encoder_linear_units=1536, decoder_layers=4, decoder_num_attention_heads=2, decoder_linear_units=1536, speech_decoder_postnet_layers=5, speech_decoder_postnet_units=256, speech_decoder_postnet_kernel=5, positionwise_conv_kernel_size=3, encoder_normalize_before=False, decoder_normalize_before=False, encoder_concat_after=False, decoder_concat_after=False, reduction_factor=1, speaking_speed=1.0, use_macaron_style_in_conformer=True, use_cnn_in_conformer=True, encoder_kernel_size=7, decoder_kernel_size=31, duration_predictor_layers=2, duration_predictor_channels=256, duration_predictor_kernel_size=3, energy_predictor_layers=2, energy_predictor_channels=256, energy_predictor_kernel_size=3, energy_predictor_dropout=0.5, energy_embed_kernel_size=1, energy_embed_dropout=0.0, stop_gradient_from_energy_predictor=False, pitch_predictor_layers=5, pitch_predictor_channels=256, pitch_predictor_kernel_size=5, pitch_predictor_dropout=0.5, pitch_embed_kernel_size=1, pitch_embed_dropout=0.0, stop_gradient_from_pitch_predictor=True, encoder_dropout_rate=0.2, encoder_positional_dropout_rate=0.2, encoder_attention_dropout_rate=0.2, decoder_dropout_rate=0.2, decoder_positional_dropout_rate=0.2, decoder_attention_dropout_rate=0.2, duration_predictor_dropout_rate=0.2, speech_decoder_postnet_dropout=0.5, max_source_positions=5000, use_masking=True, use_weighted_masking=False, num_speakers=None, num_languages=None, speaker_embed_dim=None, is_encoder_decoder=True, **kwargs, ): if positionwise_conv_kernel_size % 2 == 0: raise ValueError( f"positionwise_conv_kernel_size must be odd, but got {positionwise_conv_kernel_size} instead." ) if encoder_kernel_size % 2 == 0: raise ValueError(f"encoder_kernel_size must be odd, but got {encoder_kernel_size} instead.") if decoder_kernel_size % 2 == 0: raise ValueError(f"decoder_kernel_size must be odd, but got {decoder_kernel_size} instead.") if duration_predictor_kernel_size % 2 == 0: raise ValueError( f"duration_predictor_kernel_size must be odd, but got {duration_predictor_kernel_size} instead." ) if energy_predictor_kernel_size % 2 == 0: raise ValueError( f"energy_predictor_kernel_size must be odd, but got {energy_predictor_kernel_size} instead." ) if energy_embed_kernel_size % 2 == 0: raise ValueError(f"energy_embed_kernel_size must be odd, but got {energy_embed_kernel_size} instead.") if pitch_predictor_kernel_size % 2 == 0: raise ValueError( f"pitch_predictor_kernel_size must be odd, but got {pitch_predictor_kernel_size} instead." ) if pitch_embed_kernel_size % 2 == 0: raise ValueError(f"pitch_embed_kernel_size must be odd, but got {pitch_embed_kernel_size} instead.") if hidden_size % encoder_num_attention_heads != 0: raise ValueError("The hidden_size must be evenly divisible by encoder_num_attention_heads.") if hidden_size % decoder_num_attention_heads != 0: raise ValueError("The hidden_size must be evenly divisible by decoder_num_attention_heads.") if use_masking and use_weighted_masking: raise ValueError("Either use_masking or use_weighted_masking can be True, but not both.") self.hidden_size = hidden_size self.vocab_size = vocab_size self.num_mel_bins = num_mel_bins self.encoder_config = { "num_attention_heads": encoder_num_attention_heads, "layers": encoder_layers, "kernel_size": encoder_kernel_size, "attention_dropout_rate": encoder_attention_dropout_rate, "dropout_rate": encoder_dropout_rate, "positional_dropout_rate": encoder_positional_dropout_rate, "linear_units": encoder_linear_units, "normalize_before": encoder_normalize_before, "concat_after": encoder_concat_after, } self.decoder_config = { "num_attention_heads": decoder_num_attention_heads, "layers": decoder_layers, "kernel_size": decoder_kernel_size, "attention_dropout_rate": decoder_attention_dropout_rate, "dropout_rate": decoder_dropout_rate, "positional_dropout_rate": decoder_positional_dropout_rate, "linear_units": decoder_linear_units, "normalize_before": decoder_normalize_before, "concat_after": decoder_concat_after, } self.encoder_num_attention_heads = encoder_num_attention_heads self.encoder_layers = encoder_layers self.duration_predictor_channels = duration_predictor_channels self.duration_predictor_kernel_size = duration_predictor_kernel_size self.duration_predictor_layers = duration_predictor_layers self.energy_embed_dropout = energy_embed_dropout self.energy_embed_kernel_size = energy_embed_kernel_size self.energy_predictor_channels = energy_predictor_channels self.energy_predictor_dropout = energy_predictor_dropout self.energy_predictor_kernel_size = energy_predictor_kernel_size self.energy_predictor_layers = energy_predictor_layers self.pitch_embed_dropout = pitch_embed_dropout self.pitch_embed_kernel_size = pitch_embed_kernel_size self.pitch_predictor_channels = pitch_predictor_channels self.pitch_predictor_dropout = pitch_predictor_dropout self.pitch_predictor_kernel_size = pitch_predictor_kernel_size self.pitch_predictor_layers = pitch_predictor_layers self.positionwise_conv_kernel_size = positionwise_conv_kernel_size self.speech_decoder_postnet_units = speech_decoder_postnet_units self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel self.speech_decoder_postnet_layers = speech_decoder_postnet_layers self.reduction_factor = reduction_factor self.speaking_speed = speaking_speed self.stop_gradient_from_energy_predictor = stop_gradient_from_energy_predictor self.stop_gradient_from_pitch_predictor = stop_gradient_from_pitch_predictor self.max_source_positions = max_source_positions self.use_cnn_in_conformer = use_cnn_in_conformer self.use_macaron_style_in_conformer = use_macaron_style_in_conformer self.use_masking = use_masking self.use_weighted_masking = use_weighted_masking self.num_speakers = num_speakers self.num_languages = num_languages self.speaker_embed_dim = speaker_embed_dim self.duration_predictor_dropout_rate = duration_predictor_dropout_rate self.is_encoder_decoder = is_encoder_decoder super().__init__( is_encoder_decoder=is_encoder_decoder, **kwargs, ) class FastSpeech2ConformerHifiGanConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FastSpeech2ConformerHifiGanModel`]. It is used to instantiate a FastSpeech2Conformer HiFi-GAN vocoder model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2Conformer [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_in_dim (`int`, *optional*, defaults to 80): The number of frequency bins in the input log-mel spectrogram. upsample_initial_channel (`int`, *optional*, defaults to 512): The number of input channels into the upsampling network. upsample_rates (`tuple[int]` or `list[int]`, *optional*, defaults to `[8, 8, 2, 2]`): A tuple of integers defining the stride of each 1D convolutional layer in the upsampling network. The length of *upsample_rates* defines the number of convolutional layers and has to match the length of *upsample_kernel_sizes*. upsample_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[16, 16, 4, 4]`): A tuple of integers defining the kernel size of each 1D convolutional layer in the upsampling network. The length of *upsample_kernel_sizes* defines the number of convolutional layers and has to match the length of *upsample_rates*. resblock_kernel_sizes (`tuple[int]` or `list[int]`, *optional*, defaults to `[3, 7, 11]`): A tuple of integers defining the kernel sizes of the 1D convolutional layers in the multi-receptive field fusion (MRF) module. resblock_dilation_sizes (`tuple[tuple[int]]` or `list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`): A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the multi-receptive field fusion (MRF) module. initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. leaky_relu_slope (`float`, *optional*, defaults to 0.1): The angle of the negative slope used by the leaky ReLU activation. normalize_before (`bool`, *optional*, defaults to `True`): Whether or not to normalize the spectrogram before vocoding using the vocoder's learned mean and variance. Example: ```python >>> from transformers import FastSpeech2ConformerHifiGan, FastSpeech2ConformerHifiGanConfig >>> # Initializing a FastSpeech2ConformerHifiGan configuration >>> configuration = FastSpeech2ConformerHifiGanConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = FastSpeech2ConformerHifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "hifigan" base_config_key = "vocoder_config" def __init__( self, model_in_dim=80, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], initializer_range=0.01, leaky_relu_slope=0.1, normalize_before=True, **kwargs, ): self.model_in_dim = model_in_dim self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes self.initializer_range = initializer_range self.leaky_relu_slope = leaky_relu_slope self.normalize_before = normalize_before super().__init__(**kwargs) class FastSpeech2ConformerWithHifiGanConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`FastSpeech2ConformerWithHifiGan`]. It is used to instantiate a `FastSpeech2ConformerWithHifiGanModel` model according to the specified sub-models configurations, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FastSpeech2ConformerModel [espnet/fastspeech2_conformer](https://huggingface.co/espnet/fastspeech2_conformer) and FastSpeech2ConformerHifiGan [espnet/fastspeech2_conformer_hifigan](https://huggingface.co/espnet/fastspeech2_conformer_hifigan) architectures. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: model_config (`typing.Dict`, *optional*): Configuration of the text-to-speech model. vocoder_config (`typing.Dict`, *optional*): Configuration of the vocoder model. model_config ([`FastSpeech2ConformerConfig`], *optional*): Configuration of the text-to-speech model. vocoder_config ([`FastSpeech2ConformerHiFiGanConfig`], *optional*): Configuration of the vocoder model. Example: ```python >>> from transformers import ( ... FastSpeech2ConformerConfig, ... FastSpeech2ConformerHifiGanConfig, ... FastSpeech2ConformerWithHifiGanConfig, ... FastSpeech2ConformerWithHifiGan, ... ) >>> # Initializing FastSpeech2ConformerWithHifiGan sub-modules configurations. >>> model_config = FastSpeech2ConformerConfig() >>> vocoder_config = FastSpeech2ConformerHifiGanConfig() >>> # Initializing a FastSpeech2ConformerWithHifiGan module style configuration >>> configuration = FastSpeech2ConformerWithHifiGanConfig(model_config.to_dict(), vocoder_config.to_dict()) >>> # Initializing a model (with random weights) >>> model = FastSpeech2ConformerWithHifiGan(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "fastspeech2_conformer_with_hifigan" sub_configs = {"model_config": FastSpeech2ConformerConfig, "vocoder_config": FastSpeech2ConformerHifiGanConfig} def __init__( self, model_config: Optional[dict] = None, vocoder_config: Optional[dict] = None, **kwargs, ): if model_config is None: model_config = {} logger.info("model_config is None. initializing the model with default values.") if vocoder_config is None: vocoder_config = {} logger.info("vocoder_config is None. initializing the coarse model with default values.") self.model_config = FastSpeech2ConformerConfig(**model_config) self.vocoder_config = FastSpeech2ConformerHifiGanConfig(**vocoder_config) super().__init__(**kwargs) __all__ = ["FastSpeech2ConformerConfig", "FastSpeech2ConformerHifiGanConfig", "FastSpeech2ConformerWithHifiGanConfig"]
transformers/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py/0
{ "file_path": "transformers/src/transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py", "repo_id": "transformers", "token_count": 9692 }
472
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Flava.""" import math import random from collections.abc import Iterable from functools import lru_cache from typing import Any, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging from ...utils.import_utils import requires if is_vision_available(): import PIL logger = logging.get_logger(__name__) # These values are taken from CLIP FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN FLAVA_IMAGE_STD = OPENAI_CLIP_STD FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0] FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0] LOGIT_LAPLACE_EPS: float = 0.1 # Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py class FlavaMaskingGenerator: def __init__( self, input_size: Union[int, tuple[int, int]] = 14, total_mask_patches: int = 75, mask_group_max_patches: Optional[int] = None, mask_group_min_patches: int = 16, mask_group_min_aspect_ratio: Optional[float] = 0.3, mask_group_max_aspect_ratio: Optional[float] = None, ): if not isinstance(input_size, tuple): input_size = (input_size,) * 2 self.height, self.width = input_size self.num_patches = self.height * self.width self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio)) def __repr__(self): repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % ( self.height, self.width, self.mask_group_min_patches, self.mask_group_max_patches, self.total_mask_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1], ) return repr_str def get_shape(self): return self.height, self.width def _mask(self, mask, max_mask_patches): delta = 0 for _attempt in range(10): target_area = random.uniform(self.mask_group_min_patches, max_mask_patches) aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) height = int(round(math.sqrt(target_area * aspect_ratio))) width = int(round(math.sqrt(target_area / aspect_ratio))) if width < self.width and height < self.height: top = random.randint(0, self.height - height) left = random.randint(0, self.width - width) num_masked = mask[top : top + height, left : left + width].sum() # Overlap if 0 < height * width - num_masked <= max_mask_patches: for i in range(top, top + height): for j in range(left, left + width): if mask[i, j] == 0: mask[i, j] = 1 delta += 1 if delta > 0: break return delta def __call__(self): mask = np.zeros(shape=self.get_shape(), dtype=int) mask_count = 0 while mask_count < self.total_mask_patches: max_mask_patches = self.total_mask_patches - mask_count max_mask_patches = min(max_mask_patches, self.mask_group_max_patches) delta = self._mask(mask, max_mask_patches) if delta == 0: break else: mask_count += delta return mask @requires(backends=("vision",)) class FlavaImageProcessor(BaseImageProcessor): r""" Constructs a Flava image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in `preprocess`. size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`. crop_size (`dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`): Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the `crop_size` parameter in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in `preprocess`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in `preprocess`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. return_image_mask (`bool`, *optional*, defaults to `False`): Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`. input_size_patches (`int`, *optional*, defaults to 14): Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden by the `input_size_patches` parameter in `preprocess`. total_mask_patches (`int`, *optional*, defaults to 75): Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in `preprocess`. mask_group_min_patches (`int`, *optional*, defaults to 16): Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches` parameter in `preprocess`. mask_group_max_patches (`int`, *optional*): Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches` parameter in `preprocess`. mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3): Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter in `preprocess`. mask_group_max_aspect_ratio (`float`, *optional*): Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter in `preprocess`. codebook_do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize` parameter in `preprocess`. `codebook_size`. codebook_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in `preprocess`. codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample` parameter in `preprocess`. codebook_do_center_crop (`bool`, *optional*, defaults to `True`): Whether to crop the input for codebook at the center. If the input size is smaller than `codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by the `codebook_do_center_crop` parameter in `preprocess`. codebook_crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`): Desired output size for codebook input when applying center-cropping. Can be overridden by the `codebook_crop_size` parameter in `preprocess`. codebook_do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be overridden by the `codebook_do_rescale` parameter in `preprocess`. codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Defines the scale factor to use if rescaling the codebook image. Can be overridden by the `codebook_rescale_factor` parameter in `preprocess`. codebook_do_map_pixels (`bool`, *optional*, defaults to `True`): Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the `codebook_do_map_pixels` parameter in `preprocess`. codebook_do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can be overridden by the `codebook_do_normalize` parameter in `preprocess`. codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`): The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden by the `codebook_image_mean` parameter in `preprocess`. codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`): The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can be overridden by the `codebook_image_std` parameter in `preprocess`. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, Iterable[float]]] = None, image_std: Optional[Union[float, Iterable[float]]] = None, # Mask related params return_image_mask: bool = False, input_size_patches: int = 14, total_mask_patches: int = 75, mask_group_min_patches: int = 16, mask_group_max_patches: Optional[int] = None, mask_group_min_aspect_ratio: float = 0.3, mask_group_max_aspect_ratio: Optional[float] = None, # Codebook related params return_codebook_pixels: bool = False, codebook_do_resize: bool = True, codebook_size: Optional[bool] = None, codebook_resample: int = PILImageResampling.LANCZOS, codebook_do_center_crop: bool = True, codebook_crop_size: Optional[int] = None, codebook_do_rescale: bool = True, codebook_rescale_factor: Union[int, float] = 1 / 255, codebook_do_map_pixels: bool = True, codebook_do_normalize: bool = True, codebook_image_mean: Optional[Union[float, Iterable[float]]] = None, codebook_image_std: Optional[Union[float, Iterable[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 224, "width": 224} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size, param_name="crop_size") codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112} codebook_size = get_size_dict(codebook_size, param_name="codebook_size") codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112} codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD self.return_image_mask = return_image_mask self.input_size_patches = input_size_patches self.total_mask_patches = total_mask_patches self.mask_group_min_patches = mask_group_min_patches self.mask_group_max_patches = mask_group_max_patches self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio self.return_codebook_pixels = return_codebook_pixels self.codebook_do_resize = codebook_do_resize self.codebook_size = codebook_size self.codebook_resample = codebook_resample self.codebook_do_center_crop = codebook_do_center_crop self.codebook_crop_size = codebook_crop_size self.codebook_do_rescale = codebook_do_rescale self.codebook_rescale_factor = codebook_rescale_factor self.codebook_do_map_pixels = codebook_do_map_pixels self.codebook_do_normalize = codebook_do_normalize self.codebook_image_mean = codebook_image_mean self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD @classmethod def from_dict(cls, image_processor_dict: dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)` """ image_processor_dict = image_processor_dict.copy() if "codebook_size" in kwargs: image_processor_dict["codebook_size"] = kwargs.pop("codebook_size") if "codebook_crop_size" in kwargs: image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size") return super().from_dict(image_processor_dict, **kwargs) @lru_cache def masking_generator( self, input_size_patches, total_mask_patches, mask_group_min_patches, mask_group_max_patches, mask_group_min_aspect_ratio, mask_group_max_aspect_ratio, ) -> FlavaMaskingGenerator: return FlavaMaskingGenerator( input_size=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio, ) # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def map_pixels(self, image: np.ndarray) -> np.ndarray: return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_map_pixels: Optional[bool] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. image = to_numpy_array(image) if do_rescale and is_scaled_image(image): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) if do_map_pixels: image = self.map_pixels(image) if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image @filter_out_non_signature_kwargs() def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, # Mask related params return_image_mask: Optional[bool] = None, input_size_patches: Optional[int] = None, total_mask_patches: Optional[int] = None, mask_group_min_patches: Optional[int] = None, mask_group_max_patches: Optional[int] = None, mask_group_min_aspect_ratio: Optional[float] = None, mask_group_max_aspect_ratio: Optional[float] = None, # Codebook related params return_codebook_pixels: Optional[bool] = None, codebook_do_resize: Optional[bool] = None, codebook_size: Optional[dict[str, int]] = None, codebook_resample: Optional[int] = None, codebook_do_center_crop: Optional[bool] = None, codebook_crop_size: Optional[dict[str, int]] = None, codebook_do_rescale: Optional[bool] = None, codebook_rescale_factor: Optional[float] = None, codebook_do_map_pixels: Optional[bool] = None, codebook_do_normalize: Optional[bool] = None, codebook_image_mean: Optional[Iterable[float]] = None, codebook_image_std: Optional[Iterable[float]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`): Whether to return the image mask. input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`): Size of the patches to extract from the image. total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`): Total number of patches to extract from the image. mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`): Minimum number of patches to extract from the image. mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`): Maximum number of patches to extract from the image. mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`): Minimum aspect ratio of the patches to extract from the image. mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`): Maximum aspect ratio of the patches to extract from the image. return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`): Whether to return the codebook pixels. codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`): Whether to resize the codebook pixels. codebook_size (`dict[str, int]`, *optional*, defaults to `self.codebook_size`): Size of the codebook pixels. codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`): Resampling filter to use if resizing the codebook pixels. This can be one of the enum `PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`. codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`): Whether to center crop the codebook pixels. codebook_crop_size (`dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`): Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set to `True`. codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`): Whether to rescale the codebook pixels values between [0 - 1]. codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`): Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`. codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`): Whether to map the codebook pixels values. codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`): Whether to normalize the codebook pixels. codebook_image_mean (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_mean`): Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`. codebook_image_std (`float` or `list[float]`, *optional*, defaults to `self.codebook_image_std`): Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches mask_group_min_patches = ( mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches ) mask_group_max_patches = ( mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches ) mask_group_min_aspect_ratio = ( mask_group_min_aspect_ratio if mask_group_min_aspect_ratio is not None else self.mask_group_min_aspect_ratio ) mask_group_max_aspect_ratio = ( mask_group_max_aspect_ratio if mask_group_max_aspect_ratio is not None else self.mask_group_max_aspect_ratio ) return_codebook_pixels = ( return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels ) codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize codebook_size = codebook_size if codebook_size is not None else self.codebook_size codebook_size = get_size_dict(codebook_size, param_name="codebook_size") codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale codebook_rescale_factor = ( codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor ) codebook_do_center_crop = ( codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop ) codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size") codebook_do_map_pixels = ( codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels ) codebook_do_normalize = ( codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize ) codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) processed_images = [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_map_pixels=False, data_format=data_format, input_data_format=input_data_format, ) for img in images ] data = {"pixel_values": processed_images} if return_codebook_pixels: codebook_images = [ self._preprocess_image( image=img, do_resize=codebook_do_resize, size=codebook_size, resample=codebook_resample, do_center_crop=codebook_do_center_crop, crop_size=codebook_crop_size, do_rescale=codebook_do_rescale, rescale_factor=codebook_rescale_factor, do_normalize=codebook_do_normalize, image_mean=codebook_image_mean, image_std=codebook_image_std, do_map_pixels=codebook_do_map_pixels, data_format=data_format, input_data_format=input_data_format, ) for img in images ] data["codebook_pixel_values"] = codebook_images if return_image_mask: mask_generator = self.masking_generator( input_size_patches=input_size_patches, total_mask_patches=total_mask_patches, mask_group_min_patches=mask_group_min_patches, mask_group_max_patches=mask_group_max_patches, mask_group_min_aspect_ratio=mask_group_min_aspect_ratio, mask_group_max_aspect_ratio=mask_group_max_aspect_ratio, ) masks = [mask_generator() for _ in images] data["bool_masked_pos"] = masks return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["FlavaImageProcessor"]
transformers/src/transformers/models/flava/image_processing_flava.py/0
{ "file_path": "transformers/src/transformers/models/flava/image_processing_flava.py", "repo_id": "transformers", "token_count": 16309 }
473
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math from collections.abc import Callable, Sequence from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, SlidingWindowLayer from ...configuration_utils import PretrainedConfig, layer_type_validation from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_rope_utils import rope_config_validation from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg from ..auto import AutoModel from ..gemma2.configuration_gemma2 import Gemma2Config from ..gemma2.modeling_gemma2 import ( Gemma2MLP, Gemma2PreTrainedModel, Gemma2RotaryEmbedding, eager_attention_forward, rotate_half, ) from ..gemma3.modeling_gemma3 import ( Gemma3Attention, Gemma3DecoderLayer, Gemma3ForCausalLM, Gemma3RMSNorm, Gemma3TextModel, Gemma3TextScaledWordEmbedding, ) from ..paligemma.modeling_paligemma import ( PaliGemmaCausalLMOutputWithPast, PaliGemmaForConditionalGeneration, PaliGemmaModel, PaligemmaModelOutputWithPast, ) from ..timm_wrapper.configuration_timm_wrapper import TimmWrapperConfig logger = logging.get_logger(__name__) class Gemma3nTextConfig(Gemma2Config, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nTextModel`]. It is used to instantiate an Gemma3nTextModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects that inherit from [`Gemma3nTextConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nTextConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 262400): Vocabulary size of the Gemma3nText model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Gemma3nTextModel`] vocab_size_per_layer_input (`int`, *optional*, defaults to 262144): Vocabulary size of the per-layer text embeddings that augment the standard embeddings. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. hidden_size_per_layer_input (`int`, *optional*, defaults to 256): Dimension of the hidden representations for per-layer emebeddings. intermediate_size (`int` or `Sequence[int]`, *optional*, defaults to 16384): Dimension of the MLP representations. MatFormer configurations may wish to provide a sequence of integers to account for vairable intermediate_size values across layers. In such cases, `len(intermediate_size) == num_hidden_layers`. num_hidden_layers (`int`, *optional*, defaults to 35): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout this [paper](https://arxiv.org/pdf/2305.13245.pdf). If not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings used in gloabl attention. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE rope_local_base_freq (float, *optional*, defaults to 10000.0): The base period of the RoPE embeddings for local attention. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 512): This is the size of the sliding window used by local attention layers. layer_types (`Optional`, *optional*): A sequence of strings defining the attention type for that layer as either "sliding_attention" or "full_attention". If not provided, `layer_types` will de inferred from `num_hidden_layers` using a pattern of four "sliding_attention" layers followed one "full_attention". The last layer in the model should always be a "full_attention" layer. final_logit_softcapping (`float`, *optional*, defaults to 30.0): Scaling factor when applying tanh softcapping on the logits. altup_active_idx (`int`, *optional*, defaults to 0): The index of the prediction from which AltUp will compute additional predictions or correct altup_coef_clip (`float`, *optional*, defaults to 120.0): The maximum amplitude of an AltUp prediction or correction coeficient weight. altup_correct_scale (`bool`, *optional*, defaults to `True`): If True, apply the `AltUp.correct_output_scale` to the corrected prediction at `altup_active_idx`. altup_num_inputs (`int`, *optional*, defaults to 4): The number of predictions that AltUp should be make given the input sequence. num_kv_shared_layers (`int`, *optional*, defaults to 15): The number of layer that share KV cache values. During the forward pass, the last `num_kv_shared_layers` layers in the model "share" the KV values in that each local and global layer in this range uses the KV cache values computed for the last local or global layer, respectively, before entering this range. The value should be `num_kv_shared_layers` should be a scalar of `sliding_window_pattern`. laurel_rank (int, *optional*, defaults to 64): The intermediate size for the linear projections in the Learned Augmented Residual Layer. activation_sparsity_pattern (Sequence[float], *optional*, defaults to `(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)`): The sparsity factor used to extract the top-k activations for a given layer. The provided Sequence must explicitly provide a sparsity value for each layer in the model. ```python >>> from transformers import Gemma3nTextModel, Gemma3nTextConfig >>> # Initializing a Gemma3nText gemma3n_text-E4B style configuration >>> configuration = Gemma3nTextConfig() >>> # Initializing a model from the gemma3n_text-E4B style configuration >>> model = Gemma3nTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_text" def __init__( self, vocab_size: int = 262_400, vocab_size_per_layer_input: int = 262_144, hidden_size: int = 2048, hidden_size_per_layer_input: int = 256, intermediate_size: Union[int, Sequence[int]] = 16_384, num_hidden_layers: int = 35, num_attention_heads: int = 8, num_key_value_heads: int = 2, head_dim: int = 256, hidden_activation: str = "gelu_pytorch_tanh", max_position_embeddings: int = 32_768, initializer_range: float = 0.02, rms_norm_eps: float = 1e-6, use_cache: bool = True, pad_token_id: int = 0, eos_token_id: int = 1, bos_token_id: int = 2, rope_theta: float = 1_000_000.0, rope_scaling: Optional[dict[str, Any]] = None, rope_local_base_freq: float = 10_000.0, attention_bias: bool = False, attention_dropout: float = 0.0, sliding_window: int = 512, layer_types: Optional[Sequence[str]] = None, final_logit_softcapping: float = 30.0, altup_active_idx: int = 0, altup_coef_clip: float = 120.0, altup_correct_scale: bool = True, altup_num_inputs: int = 4, num_kv_shared_layers: int = 15, laurel_rank: int = 64, activation_sparsity_pattern: Optional[Union[float, Sequence[float]]] = (0.95,) * 10 + (0.0,) * 25, **kwargs, ): PretrainedConfig.__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) if isinstance(intermediate_size, Sequence) and (intsize_len := len(intermediate_size)) != num_hidden_layers: raise ValueError( "intermediate_size must have an explicit intermediate size for every layer or one for all layers. " f"Expected {num_hidden_layers} values but got {intsize_len}." ) elif not isinstance(intermediate_size, Sequence): intermediate_size = [intermediate_size] * num_hidden_layers self.vocab_size = vocab_size self.vocab_size_per_layer_input = vocab_size_per_layer_input self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.sliding_window = sliding_window self.final_logit_softcapping = final_logit_softcapping self.layer_types = layer_types self.rope_local_base_freq = rope_local_base_freq self.rope_scaling = rope_scaling rope_config_validation(self) if layer_types is None: self.layer_types = [ "full_attention" if (i + 1) % 5 == 0 else "sliding_attention" for i in range(self.num_hidden_layers) ] else: self.layer_types = layer_types layer_type_validation(self.layer_types) self.hidden_size_per_layer_input = hidden_size_per_layer_input self.num_kv_shared_layers = num_kv_shared_layers self.altup_active_idx = altup_active_idx self.altup_coef_clip = altup_coef_clip self.altup_correct_scale = altup_correct_scale self.altup_num_inputs = altup_num_inputs self.laurel_rank = laurel_rank if activation_sparsity_pattern is None: activation_sparsity_pattern = [0.0] * num_hidden_layers if (len_asp := len(activation_sparsity_pattern)) != num_hidden_layers: raise ValueError( "activation_sparsity_pattern must have an explicit activation sparsity value for every layer." f"Expected {num_hidden_layers} values but got {len_asp}." ) self.activation_sparsity_pattern = activation_sparsity_pattern class Gemma3nAudioConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nAudioEncoder`]. It is used to instantiate an `Gemma3nAudioEncoder` model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g., [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects that inherit from [`Gemma3nAudioConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nAudioConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 128): Vocabulary size of the additional hard-token embeddings for audio model. These augment the embeddings included in the `Gemma3nTextModel` to provide, e.g., the end of audio and audio soft token placeholder tokens when converting `input_ids` to embeddings in the `Gemma3nForConditionalGeneration` model. vocab_offset (`int`, *optional*, defaults to 262272): Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the 0-indexed `Gemma3nMultimodalEmbedder.embedding` table. input_feat_size (`int`, *optional*, defaults to 128): The number of channels in each mel-spectrogram frame. hidden_size (`int`, *optional*, defaults to 1536): Dimension of the hidden representations. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. gradient_clipping (`float`, *optional*, defaults to 10000000000.0): Clipping value used to stablize extremely large gradient values. conf_attention_chunk_size (`int`, *optional*, defaults to 12): The sub-sequence size for local attention processing inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_context_left (`int`, *optional*, defaults to 13): The left context size of the local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_context_right (`int`, *optional*, defaults to 0): The right context size of the local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_logit_cap (`float`, *optional*, defaults to 50.0): Logit cap applied during local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_num_hidden_layers (`int`, *optional*, defaults to 12): The number of layers that use local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_conv_kernel_size (`int`, *optional*, defaults to 5): Convolution kernel size for the conformer block inside the Conformer ("conf") section of the Universal Speech Model. conf_reduction_factor (`int`, *optional*, defaults to 4): Reduction factor used in the conformer block inside the Conformer ("conf") section of the Universal Speech Model. conf_residual_weight (`float`, *optional*, defaults to 0.5): Residual connection weight inside the Conformer ("conf") section of the Universal Speech Model. sscp_conv_channel_size (`tuple(int, int)`, *optional*, defaults to `(128, 32)`): The channel sizes for the first and second convolutional layers in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. sscp_conv_group_norm_eps (`float`, *optional*, defaults to 0.001): Epsilon used in group normalization in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. sscp_conv_kernel_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((3, 3), (3, 3))`): Kernel sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. The kernel sizes are specified as a tuple of height and width for each layer, where the height corresponds to the time dimension and the width corresponds to the frequency dimension. sscp_conv_stride_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((2, 2), (2, 2))`): Stride sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. The stride sizes are specified as a tuple of height and width for each layer, where the height corresponds to the time dimension and the width corresponds to the frequency dimension. Example: ```python >>> from transformers import Gemma3nAudioConfig, Gemma3nAudioEncoder >>> # Initializing a Gemma3nAudioEncoder gemma3n_audio-E4B-style configuration >>> configuration = Gemma3nAudioConfig() >>> # Initializing a model from the gemma3n_audio-E4B style configuration >>> model = Gemma3nAudioEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_audio" def __init__( self, vocab_size: int = 128, vocab_offset: int = 262_144 + 128, # text vocab size + vision vocab size input_feat_size: int = 128, hidden_size: int = 1536, rms_norm_eps: float = 1e-6, gradient_clipping: float = 10_000_000_000.0, conf_attention_chunk_size: int = 12, conf_attention_context_left: int = 13, conf_attention_context_right: int = 0, conf_attention_logit_cap: float = 50.0, conf_num_attention_heads: int = 8, conf_num_hidden_layers: int = 12, conf_conv_kernel_size: int = 5, conf_reduction_factor: int = 4, conf_residual_weight: float = 0.5, sscp_conv_channel_size: tuple[int, int] = (128, 32), sscp_conv_group_norm_eps: float = 1e-3, sscp_conv_kernel_size: tuple[tuple[int, int], tuple[int, int]] = ( (3, 3), (3, 3), ), sscp_conv_stride_size: tuple[tuple[int, int], tuple[int, int]] = ( (2, 2), (2, 2), ), **kwargs, ): super().__init__(**kwargs) self.input_feat_size = input_feat_size self.hidden_size = hidden_size self.rms_norm_eps = rms_norm_eps self.vocab_size = vocab_size self.vocab_offset = vocab_offset self.gradient_clipping = gradient_clipping self.conf_attention_chunk_size = conf_attention_chunk_size self.conf_attention_context_left = conf_attention_context_left self.conf_attention_context_right = conf_attention_context_right self.conf_attention_logit_cap = conf_attention_logit_cap self.conf_num_attention_heads = conf_num_attention_heads self.conf_num_hidden_layers = conf_num_hidden_layers self.conf_conv_kernel_size = conf_conv_kernel_size self.conf_reduction_factor = conf_reduction_factor self.conf_residual_weight = conf_residual_weight self.sscp_conv_channel_size = sscp_conv_channel_size self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps self.sscp_conv_kernel_size = sscp_conv_kernel_size self.sscp_conv_stride_size = sscp_conv_stride_size class Gemma3nVisionConfig(TimmWrapperConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to instantiate an timm model model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B vision tower, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects inherit from [`Gemma3nVisionConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nVisionConfig`] for more information. Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default imagenet models is set to `None` due to occlusions in the label descriptions. Args: initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. do_pooling (`bool`, *optional*, defaults to `False`): Whether to do pooling for the last_hidden_state in `TimmWrapper` or not. architecture (`str`, *optional*, defaults to `"mobilenetv5_300m_enc"`): Determines vision architecture for TimmWrapper. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. vocab_size (`int`, *optional*, defaults to 128): Vocabulary size of the additional hard-token embeddings for vision model. vocab_offset (`int`, *optional*, defaults to 262144): Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the 0-indexed `Gemma3nMultimodalEmbedder.embedding` table. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. Example: ```python >>> from transformers import Gemma3nVisionConfig, TimmWrapper >>> # Initializing a TimmWrapper gemma3n_vision-E4B-style configuration >>> configuration = Gemma3nVisionConfig() >>> # Initializing a gemma3n_vision-E4B-style TimmWrapper from the configuration >>> model = TimmWrapper(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_vision" def __init__( self, initializer_range: float = 0.02, do_pooling: bool = False, architecture: str = "mobilenetv5_300m_enc", hidden_size: int = 2048, vocab_size: int = 128, vocab_offset: int = 262_144, rms_norm_eps: float = 1e-06, model_args: Optional[dict] = None, **kwargs, ): super().__init__(**kwargs) self.architecture = architecture self.initializer_range = initializer_range self.do_pooling = do_pooling self.hidden_size = hidden_size self.vocab_size = vocab_size self.vocab_offset = vocab_offset self.rms_norm_eps = rms_norm_eps class Gemma3nConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nForConditionalGeneration`]. It is used to instantiate a Gemma3nForConditionalGeneration according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Gemma3n-E4B. e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`Union[Gemma3nTextConfig, dict]`, *optional*): The config object of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*): Custom vision config or dict. audio_config (`Union[AutoConfig, dict]`, *optional*): Custom audio config or dict. audio_soft_tokens_per_image (`int`, *optional*, defaults to 188): The number of soft tokens per audio clip. vision_soft_tokens_per_image (`int`, *optional*, defaults to 256): The number of soft tokens per image. boi_token_id (`int`, *optional*, defaults to 255999): The begin-of-image token index to wrap the image prompt. eoi_token_id (`int`, *optional*, defaults to 262144): The end-of-image token index to wrap the image prompt. image_token_id (`int`, *optional*, defaults to 262145): The image token index to encode the image prompt. boa_token_id (`int`, *optional*, defaults to 256000): The begin-of-audio token index to wrap the audio prompt. eoa_token_id (`int`, *optional*, defaults to 262272): The end-of-audio token index to wrap the audio prompt. audio_token_id (`int`, *optional*, defaults to 262273): The audio token index to encode the audio prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Gemma3nForConditionalGeneration, Gemma3nConfig, Gemma3nTextConfig >>> # Initializing a MobileNet vision config, which is loaded from TIMM >>> vision_config = Gemma3nVisionConfig() >>> # Initializing a Gemma3n Audio config >>> audio_config = Gemma3nAudioConfig() >>> # Initializing a Gemma3n Text config >>> text_config = Gemma3nTextConfig() >>> # Initializing a Gemma3n gemma-3-4b style configuration >>> configuration = Gemma3nConfig(text_config, vision_config, audio_config) >>> # Initializing a model from the gemma-3-4b style configuration >>> model = Gemma3nTextConfig(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma3n" sub_configs = { "text_config": Gemma3nTextConfig, "vision_config": Gemma3nVisionConfig, "audio_config": Gemma3nAudioConfig, } def __init__( self, text_config: Optional[Union[Gemma3nTextConfig, dict[str, Any]]] = None, vision_config: Optional[Union[Gemma3nVisionConfig, dict[str, Any]]] = None, audio_config: Optional[Union[Gemma3nAudioConfig, dict[str, Any]]] = None, audio_soft_tokens_per_image: int = 188, vision_soft_tokens_per_image: int = 256, boi_token_id: int = 255_999, eoi_token_id: int = 262_144, image_token_id: int = 262_145, boa_token_id: int = 256_000, eoa_token_id: int = 262_272, audio_token_id: int = 262_273, initializer_range: float = 0.02, **kwargs, ): super().__init__(**kwargs) if isinstance(text_config, dict): text_config = Gemma3nTextConfig(**text_config) elif text_config is None: text_config = Gemma3nTextConfig() logger.info("text_config is None. Using default Gemma3nTextConfig.") if isinstance(vision_config, dict): vision_config = Gemma3nVisionConfig(**vision_config) elif vision_config is None: vision_config = Gemma3nVisionConfig() logger.info("vision_config is None. Using default Gemma3nVisionConfig.") if isinstance(audio_config, dict): audio_config = Gemma3nAudioConfig(**audio_config) elif audio_config is None: audio_config = Gemma3nAudioConfig() logger.info("audio_config is None. Using default Gemma3nAudioConfig.") self.text_config = text_config self.vision_config = vision_config self.audio_config = audio_config self.audio_soft_tokens_per_image = audio_soft_tokens_per_image self.vision_soft_tokens_per_image = vision_soft_tokens_per_image self.boi_token_id = boi_token_id self.eoi_token_id = eoi_token_id self.image_token_id = image_token_id self.boa_token_id = boa_token_id self.eoa_token_id = eoa_token_id self.audio_token_id = audio_token_id self.initializer_range = initializer_range class Gemma3nModelOutputWithPast(PaligemmaModelOutputWithPast): r""" past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. audio_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. audio_hidden_states of the model produced by the audio encoder and after projecting the last hidden state. """ audio_hidden_states: Optional[torch.FloatTensor] = None class Gemma3nCausalLMOutputWithPast(PaliGemmaCausalLMOutputWithPast): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. audio_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. audio_hidden_states of the model produced by the audio encoder and after projecting the last hidden state. """ audio_hidden_states: Optional[torch.FloatTensor] = None class Gemma3nRMSNorm(Gemma3RMSNorm): def __init__(self, dim: int, eps: float = 1e-6, with_scale: bool = True): super().__init__(dim, eps=eps) del self.weight self.with_scale = with_scale if self.with_scale: self.weight = nn.Parameter(torch.ones(dim)) else: self.register_buffer("weight", torch.tensor(1.0), persistent=False) def _norm(self, x): return x / torch.sqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x: torch.Tensor) -> torch.Tensor: # Llama does x.to(float16) * w whilst Gemma2 is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = self._norm(x.float()) * self.weight.float() return output.type_as(x) # ==== Audio Encoder ==== class Gemma3nAudioRelativePositionEmbedding(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.num_heads = self.config.conf_num_attention_heads self.channels = self.config.hidden_size self.head_dim = self.channels // self.num_heads self.max_backward = max(0, self.config.conf_attention_context_left - 1) self.max_forward = self.config.conf_attention_context_right self.pos_proj = nn.Linear(self.channels, self.num_heads * self.head_dim, bias=False) min_timescale = 1.0 max_timescale = 1.0e4 num_timescales = self.channels // 2 log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(num_timescales - 1, 1) inv_timescales = min_timescale * torch.exp(torch.arange(num_timescales) * -log_timescale_increment) self.register_buffer( "inv_timescales", inv_timescales.float().unsqueeze(0).unsqueeze(0), persistent=False, ) def _get_timing_signal_1d_pos(self, position: torch.Tensor, dtype: torch.dtype) -> torch.Tensor: position = position.float().unsqueeze(-1) scaled_time = position * self.inv_timescales.to(device=position.device, dtype=torch.float32) timing_signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=-1) return timing_signal.type(dtype) def _relative_shift( self, term_bd_before_shift: torch.Tensor, batch_size: int, num_heads: int, num_query_blocks: int, query_block_size: int, key_context_size: int, max_span_plus_1: int, ) -> torch.Tensor: """Performs the relative shift. Args: term_bd_before_shift: Tensor of shape [B, N, U, W, F_span]. batch_size (B), num_heads (N), num_query_blocks (U), query_block_size (W), key_context_size (C = W+L+R), max_span_plus_1 (F_span = L+R+1). Returns: Tensor of shape [B, N, U, W, C]. """ # term_bd_before_shift shape: [B, N, U, W, F_span] # Target shape after shift: [B, N, U, W, C] # Padding amount for the last dimension (F_span) to become (C + 1) # C = key_context_size # F_span = max_span_plus_1 pad_amount_last_dim = (key_context_size + 1) - max_span_plus_1 # PyTorch F.pad expects (pad_left, pad_right, pad_top, pad_bottom ...) # We only pad the last dimension on the right. padding_tuple = (0, pad_amount_last_dim) term_bd_padded = nn.functional.pad(term_bd_before_shift, padding_tuple) # Shape after pad: [B, N, U, W, C+1] # Reshape for slicing (emulating JAX's behavior) # [B, N, U, W * (C+1)] term_bd_reshaped = term_bd_padded.reshape( ( batch_size, num_heads, num_query_blocks, query_block_size * (key_context_size + 1), ) ) # Slice to effective [B, N, U, W * C] term_bd_sliced = term_bd_reshaped[:, :, :, : query_block_size * key_context_size] # Reshape back to [B, N, U, W, C] term_bd_shifted = term_bd_sliced.reshape( ( batch_size, num_heads, num_query_blocks, query_block_size, key_context_size, ) ) return term_bd_shifted def forward(self, queries: torch.Tensor, keys: torch.Tensor) -> torch.Tensor: # queries: [B, U, W, N, H] (batch, num_query_blocks, query_block_size, num_heads, head_dim) # keys: [B, U, C, N, H] (batch, num_query_blocks, key_context_size, num_heads, head_dim) # C = W + L + R (key_context_size) # F_span = L + R + 1 (max_span + 1) batch_size, num_query_blocks, query_block_size, num_heads, head_dim = queries.shape _, _, key_context_size, _, _ = keys.shape # Relative positions for sinusoidal embeddings: [L, L-1, ..., -R] # Length is L+R+1 = self.max_span + 1 pos_indices = torch.arange(self.max_backward, -self.max_forward - 1, -1, device=queries.device).unsqueeze( 0 ) # Shape [1, F_span] max_span_plus_1 = pos_indices.shape[1] # F_span sin_emb_timing_signal = self._get_timing_signal_1d_pos( pos_indices, dtype=queries.dtype ) # Shape [1, F_span, self.channels] # Project sinusoidal embeddings: [1, F_span, self.channels] -> [1, F_span, N*H] projected_sin_emb = self.pos_proj(sin_emb_timing_signal) # Reshape to [1, F_span, N, H] then squeeze to [F_span, N, H] sin_emb = projected_sin_emb.reshape(1, max_span_plus_1, self.num_heads, self.head_dim).squeeze( 0 ) # Shape [F, N, H] # term_ac: Query-Key content interaction # queries: [B, U, W, N, H] -> permute to [B, N, U, W, H] for matmul # keys: [B, U, C, N, H] -> permute to [B, N, U, H, C] for matmul queries_p = queries.permute(0, 3, 1, 2, 4) # [B, N, U, W, H] keys_p_t = keys.permute(0, 3, 1, 4, 2) # [B, N, U, H, C] term_ac = torch.matmul(queries_p, keys_p_t) # [B, N, U, W, C] # term_bd: Query-Position interaction # Original einsum: term_bd_unshifed = torch.einsum('buwnh,fnh->bnuwf', queries, sin_emb) # queries shape: [B, U, W, N, H] # sin_emb shape: [F, N, H] # Target output shape: [B, N, U, W, F] # Permute queries to [B, N, U, W, H] for easier broadcasting with sin_emb q_permuted = queries.permute(0, 3, 1, 2, 4) # Permute sin_emb to [N, H, F] to prepare for matmul # sin_emb original is [F, N, H] s_permuted = sin_emb.permute(1, 2, 0) # Shape: [N, H, F] # Reshape queries for matmul: [B, N, U*W, H] q_reshaped = q_permuted.reshape(batch_size, num_heads, num_query_blocks * query_block_size, head_dim) # Perform matmul: [B, N, U*W, H] @ [N, H, F] # s_permuted ([N, H, F]) will be broadcast to [B, N, H, F] # Result: [B, N, U*W, F] term_bd_unshifed_matmul = torch.matmul(q_reshaped, s_permuted) # Reshape to target [B, N, U, W, F] term_bd_unshifed = term_bd_unshifed_matmul.reshape( batch_size, num_heads, num_query_blocks, query_block_size, max_span_plus_1, ) # Apply relative shift to term_bd_unshifed term_bd_shifted = self._relative_shift( term_bd_unshifed, batch_size, num_heads, num_query_blocks, query_block_size, key_context_size, max_span_plus_1, ) # Shape [B, N, U, W, C] return term_ac + term_bd_shifted class Gemma3nAudioAttention(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.num_heads = self.config.conf_num_attention_heads self.hidden_size = self.config.hidden_size self.head_dim = self.hidden_size // self.num_heads self.chunk_size = self.config.conf_attention_chunk_size self.max_future_horizon = self.config.conf_attention_context_right self.max_past_horizon = max(0, self.config.conf_attention_context_left - 1) self.attention_logits_soft_cap = self.config.conf_attention_logit_cap self.context_size = self.chunk_size + self.max_past_horizon + self.max_future_horizon self.relative_position_embedding = Gemma3nAudioRelativePositionEmbedding(config) self.per_dim_scale = nn.Parameter(torch.zeros((self.head_dim,))) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) q_scale = self.head_dim**-0.5 r_softplus_0 = 1.0 / torch.nn.functional.softplus(torch.tensor(0.0)) self.register_buffer("q_scale", (q_scale * r_softplus_0).clone().detach(), persistent=False) lower_causal_mask = torch.tril( torch.ones((self.context_size, self.chunk_size), dtype=torch.bool), diagonal=0, ).T upper_causal_mask = torch.tril( torch.ones((self.chunk_size, self.context_size), dtype=torch.bool), diagonal=self.max_past_horizon + self.max_future_horizon, ) local_causal_valid_mask = torch.ones((self.chunk_size, self.context_size), dtype=torch.bool) local_causal_valid_mask = local_causal_valid_mask * lower_causal_mask * upper_causal_mask self.register_buffer("local_causal_valid_mask", local_causal_valid_mask, persistent=False) self.register_buffer( "softcap", torch.tensor(self.attention_logits_soft_cap).float(), persistent=False, ) def _pad_dim1(self, x: torch.Tensor, pad_left: int, pad_right: int) -> torch.Tensor: batch, _, *tail_shape = x.shape left = x.new_zeros((batch, pad_left, *tail_shape)) right = x.new_zeros((batch, pad_right, *tail_shape)) x = torch.cat([left, x, right], dim=1) return x def _convert_to_block(self, hidden_states: torch.Tensor) -> torch.Tensor: """Turns a sequence to non overlapping blocks. Args: hidden_states: a tensor of [batch, time, ...]. Returns: A tensor of [batch, num_blocks, block_size, ...], with necessary paddings, where output[:, i, ...] are x[:, i*block_size:(i+1)*block_size, ...]. """ shape = hidden_states.shape b, t = shape[:2] num_blocks = (t + self.chunk_size - 1) // self.chunk_size if (padding_len := num_blocks * self.chunk_size - t) > 0: hidden_states = self._pad_dim1(hidden_states, 0, padding_len) permute_dims = (b, num_blocks, self.chunk_size) + shape[2:] hidden_states = hidden_states.reshape(permute_dims).contiguous() return hidden_states def _extract_block_context(self, hidden_states: torch.Tensor) -> torch.Tensor: """Extracts temporal context for every block. Args: hidden_states: a tensor of [batch, time, ...]. Returns: A tensor of [batch, num_blocks, context_size, ...], with necessary paddings, where context_size = block_size + left_context + right_context, and output[:, i, ...] are x[:, start-left_context:end+right_context, ...], start = i * block_size, end = (i + 1) * block_size. """ pad_left = self.max_past_horizon # The JAX equivalent padding for signal.frame with pad_mode='valid' is # (left_context, right_context + block_size - 1) on the time dimension. # PyTorch's _pad_dim1 applies padding symmetrically if only one value is given, # or (pad_dim_start, pad_dim_end) if two are given. # Our _pad_dim1(x, pad_left, pad_right) pads dim -2 (time for [B,T,N,H]) # or dim 1 (time for [B,T]). # The current pad_right calculation matches the JAX effective padding. pad_right = self.max_future_horizon + self.chunk_size - 1 hidden_states = self._pad_dim1(hidden_states, pad_left, pad_right) frame_len = self.context_size frame_step = self.chunk_size # Directly use unfold without the subframe_factor logic # x.unfold(dimension, size, step) # dimension=1 (time dimension, assuming x is [B, T_padded, ...]) # size=frame_len (context_size) # step=frame_step (chunk_size) x_unfolded = hidden_states.unfold(dimension=1, size=frame_len, step=frame_step) # If x was [B, T_padded], x_unfolded is [B, num_blocks, frame_len] # If x was [B, T_padded, N, H], x_unfolded is [B, num_blocks, N, H, frame_len] # We want to match JAX's typical output for such operations which might be # [B, num_blocks, frame_len, N, H] if N, H are present. # The relative_position_embedding expects keys as [B, U, C, N, H]. # If x_unfolded is [B, U, N, H, C(frame_len)], we need to move C. if hidden_states.ndim > 2 and x_unfolded.ndim > 3: # Check if inner dimensions (like N, H) exist # Current shape after unfold for [B, T_pad, N, H] is [B, U, N, H, C] # Target shape for keys in RPE: [B, U, C, N, H] x_unfolded = torch.movedim(x_unfolded, source=-1, destination=2) return x_unfolded.contiguous() def forward(self, hidden_states: torch.Tensor, mask: torch.BoolTensor) -> torch.Tensor: # sl.Dense uses jax.numpy.einsum("...a,abcd->...bcd") and jax.numpy.select() qkv_shape = (*hidden_states.shape[:-1], self.num_heads, self.head_dim) query_states = self.q_proj(hidden_states).reshape(qkv_shape).contiguous() key_states = self.k_proj(hidden_states).reshape(qkv_shape).contiguous() value_states = self.v_proj(hidden_states).reshape(qkv_shape).contiguous() per_dim_scale_sp = torch.nn.functional.softplus(self.per_dim_scale) broadcast_shape = (1, 1, 1, self.head_dim) per_dim_scale_sp_broadcast = per_dim_scale_sp.view(broadcast_shape) query_states = query_states * self.q_scale * per_dim_scale_sp_broadcast batch_size, q_time = query_states.shape[:2] query_blocks = self._convert_to_block(query_states) key_blocks = self._extract_block_context(key_states) value_blocks = self._extract_block_context(value_states) num_query_blocks = query_blocks.shape[1] # 1. Create a mask indicating originally valid positions. original_valid_mask = ~mask # True for valid, False for padded # 2. Extract blocks from this validity mask. extracted_valid_mask_blocks = self._extract_block_context(original_valid_mask) # If subframe_factor was used in _extract_block_context for a [B, T] input mask, # the shape might be [B, U, C/SF, SF]. Reshape to [B, U, C]. # batch_size and num_query_blocks are known from query_blocks. # self.context_size is C. if ( extracted_valid_mask_blocks.ndim == 4 and extracted_valid_mask_blocks.shape[2] * extracted_valid_mask_blocks.shape[3] == self.context_size ): extracted_valid_mask_blocks = extracted_valid_mask_blocks.reshape( batch_size, num_query_blocks, self.context_size ) # After potential reshape, ensure it's [B, U, C] if it was from a [B,T] mask. # This assertion might be too strict if _extract_block_context handles higher-rank inputs differently, # but for the mask case, this should hold. if extracted_valid_mask_blocks.shape != ( batch_size, num_query_blocks, self.context_size, ): raise ValueError( "Shape of extracted_valid_mask_blocks" f" {extracted_valid_mask_blocks.shape} is not ({batch_size}," f" {num_query_blocks}, {self.context_size}) after potential reshape." ) # 3. Expand dimensions for broadcasting with logits and causal mask. # Target shape for broadcasting with logits [B,N,U,W,C] # extracted_valid_mask_blocks to [B, 1, U, 1, C] condition_from_input_validity = extracted_valid_mask_blocks.unsqueeze(1).unsqueeze(-2) # self.local_causal_valid_mask is [W, C], True where allowed by local window. # Expand to [1, 1, 1, W, C] condition_from_causality = self.local_causal_valid_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0) # 4. Combine the two conditions. # final_condition will be True where a key is *both* originally valid *and* causally accessible. # Broadcasts to [B, 1, U, W, C] final_condition_for_where = torch.logical_and( condition_from_input_validity, condition_from_causality.to(condition_from_input_validity.device), # Ensure same device ) # Embed queries and keys logits = self.relative_position_embedding(query_blocks, key_blocks) # Apply attention logit softcap # Ensure softcap is on the same device as logits softcap_val = self.softcap.to(logits.device) logits = logits / softcap_val logits = torch.tanh(logits) logits = logits * softcap_val # Apply the combined mask. # final_condition_for_where will broadcast with logits [B,N,U,W,C] logits = torch.where(final_condition_for_where, logits, torch.finfo(logits.dtype).min) probabilities = torch.nn.functional.softmax(logits, dim=-1, dtype=torch.float32).to(dtype=value_blocks.dtype) # context_vectors is adapted from jax.numpy.einsum("BNuwc,BucNH->BuwNH", ...) b_dim, n_dim, u_dim, w_dim, c_dim = probabilities.shape h_dim = value_blocks.shape[-1] prob_bun = probabilities.permute(0, 2, 1, 3, 4).reshape(-1, w_dim, c_dim) v_bun = value_blocks.permute(0, 1, 3, 2, 4).reshape(-1, c_dim, h_dim) result_bmm = torch.bmm(prob_bun, v_bun) context_vectors = result_bmm.reshape(b_dim, u_dim, n_dim, w_dim, h_dim).permute(0, 1, 3, 2, 4) context_vectors = context_vectors.reshape( ( batch_size, num_query_blocks * self.chunk_size, self.num_heads, self.head_dim, ) ) context_vectors = context_vectors[:, :q_time] return context_vectors class Gemma3nAudioCumulativeGroupNorm(nn.Module): """Applies Group Normalization cumulatively over the time dimension. This layer normalizes the input by calculating the mean and variance cumulatively over the time dimension (dim 1). The statistics are computed over all feature dimensions (specified by `feature_dims` and `num_channels`) for elements marked as valid by the optional `mask`. If a `mask` is provided (True for valid, False for invalid/padded), invalid time steps do not contribute to the statistics calculation, and their corresponding output values are zeroed out. Scale and bias, if enabled, are applied per-channel (last dimension). This behavior is similar to JAX's `GroupNormalization` with `num_groups=1` and `cumulative=True`. """ def __init__( self, num_channels: int, # Number of channels (size of the last dimension) feature_dims: Sequence[int], # Sizes of non-channel feature dimensions, e.g., (H, W) for input [B,T,H,W,C] eps: float = 1e-3, ): super().__init__() self.num_channels = num_channels self.feature_dims = tuple(feature_dims) self.eps = eps # Scale parameter depends only on the channel dimension self.weight = nn.Parameter(torch.ones(num_channels)) # Axes for normalization: all dimensions except Batch (0) and Time (1). # For input [B, T, *feature_dims, C], these are dims from 2 onwards. self.reduction_axes = tuple(range(2, 2 + len(self.feature_dims) + 1)) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """Applies cumulative group norm, optionally using a mask. Args: hidden_states: Input tensor, shape [B, T, *feature_dims, C]. Returns: Normalized tensor with the same shape as x. """ expected_input_suffix = self.feature_dims + (self.num_channels,) if hidden_states.shape[2:] != expected_input_suffix: raise ValueError( f"Input tensor shape suffix {hidden_states.shape[2:]} does not match expected" f" suffix (feature_dims + num_channels) {expected_input_suffix}" ) input_dtype = hidden_states.dtype # Calculations are performed in float32 for numerical stability. calc_dtype = torch.float32 x_calc = hidden_states.to(calc_dtype) # Prepare a broadcastable mask (`mask_calc`). # If no mask is provided, treat all elements as valid # (mask_calc is all ones). # Otherwise, expand the [B, T] mask to [B, T, 1, ..., 1] for broadcasting. mask_calc = torch.ones_like(x_calc, dtype=calc_dtype) # Cumulative Statistics Calculation # 1. Sum of values over reduction axes at each time step. sum_values_at_t = torch.sum(x_calc, dim=self.reduction_axes, keepdim=True) # 2. Cumulative sum of values over time. cum_sum_values = torch.cumsum(sum_values_at_t, dim=1) # 3. Count of valid elements in the normalization group at each time step. # (A "group" here consists of all features at a given Batch, Time). elements_in_group_at_t = torch.sum(mask_calc, dim=self.reduction_axes, keepdim=True) # 4. Cumulative count of valid elements over time. cum_count_elements = torch.cumsum(elements_in_group_at_t, dim=1) # Avoid division by zero if all preceding elements were masked. safe_cum_count_elements = torch.clamp(cum_count_elements, min=1.0) # 5. Cumulative mean. cum_mean = cum_sum_values / safe_cum_count_elements # 6. Sum of squared differences from the cumulative mean. # Only sum for valid elements: (x_calc - cum_mean)^2 * mask_calc. # Using x_calc here for the difference, as cum_mean already accounts for masking. squared_diff_from_mean = (x_calc - cum_mean).pow(2) sum_sq_diff_at_t = torch.sum(squared_diff_from_mean, dim=self.reduction_axes, keepdim=True) # 7. Cumulative sum of squared differences over time. cum_sum_sq_diff = torch.cumsum(sum_sq_diff_at_t, dim=1) # 8. Cumulative variance. cum_variance = cum_sum_sq_diff / safe_cum_count_elements # Normalize the input using the calculated cumulative statistics: # (x - E[x]) / sqrt(Var[x] + eps) normalized_x = (x_calc - cum_mean) * torch.rsqrt(cum_variance + self.eps) # Apply affine transformation (scale and bias) if enabled. # Scale and bias are applied per-channel (last dimension). scale = self.weight.to(calc_dtype) # Reshape for broadcasting: [C] -> [1, ..., 1, C] scale_view_shape = [1] * (hidden_states.dim() - 1) + [self.num_channels] normalized_x = normalized_x * scale.view(scale_view_shape) # Zero out outputs for time steps that were originally masked (where mask_calc is 0). # This ensures padded/invalid positions in the input result in zero output. final_output = normalized_x * mask_calc return final_output.to(input_dtype) class Gemma3nAudioSSCPConvBlock(nn.Module): """A single convolution block for the SubSampleConvProjection. This block consists of a 2D convolution, followed by CumulativeGroupNorm, and a ReLU activation. It handles manual padding for the convolution. """ def __init__( self, config: Gemma3nAudioConfig, idx: int, input_freq_dim: int, # Changed from input_spatial_dim manual_padding: tuple[int, int, int, int] = (0, 0, 0, 0), ): super().__init__() self.config = config self.manual_padding = manual_padding # in_channels is 1 for the first block, or C_out from previous block's conv in_channels = 1 if idx == 0 else self.config.sscp_conv_channel_size[idx - 1] out_channels = self.config.sscp_conv_channel_size[idx] kernel_h, kernel_w = self.config.sscp_conv_kernel_size[idx] stride_h, stride_w = self.config.sscp_conv_stride_size[idx] self.conv = nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=( kernel_h, kernel_w, ), # Kernel (kH, kW) operates on (Time, Freq_dim) stride=(stride_h, stride_w), padding=(0, 0), # Manual padding is used bias=False, ) # Calculate output frequency dimension (f_out_conv) after this convolution. # input_freq_dim is the unpadded width (feature dimension). # self.manual_padding is (pad_F_left, pad_F_right, pad_T_top, pad_T_bottom) f_in_padded = input_freq_dim + self.manual_padding[0] + self.manual_padding[1] f_out_conv = (f_in_padded - kernel_w) // stride_w + 1 self.norm = Gemma3nAudioCumulativeGroupNorm( num_channels=out_channels, # Channels of the conv output feature_dims=(f_out_conv,), # The frequency dimension size after conv eps=self.config.sscp_conv_group_norm_eps, ) self.activation = nn.ReLU() def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor: # Input audio_encodings is [B, C_in, T_in, F_in] (e.g., C_in=1) # manual_padding is (pad_F_left, pad_F_right, pad_T_top, pad_T_bottom) # F.pad applies to last two dims: F_in then T_in audio_encodings_padded = F.pad(audio_encodings, self.manual_padding, mode="constant", value=0.0).to( self.conv.weight.dtype ) # Expected padded shape for F_in, k_w=3, pad_F=(1,1) -> F_padded = F_in+2 # Expected padded shape for T_in, k_h=3, pad_T=(0,2) -> T_padded = T_in+2 audio_encodings_conv = self.conv(audio_encodings_padded) # Expected conv output shape: [B, C_out, T_out, F_out] # Input to norm is [B, T_out, F_out, C_out] x_for_norm = audio_encodings_conv.permute(0, 2, 3, 1).contiguous() x_normed = self.norm(x_for_norm) # Output of norm is [B, T_out, F_out, C_out], permute back to [B, C_out, T_out, F_out] audio_encodings_normed = x_normed.permute(0, 3, 1, 2).contiguous() return self.activation(audio_encodings_normed) class Gemma3nAudioSubSampleConvProjection(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config current_f_for_block_input = config.input_feat_size # Start with original feature dim calculated_block_padding = [] calculated_f_out_dims = [] # Tracking frequency dimension output sizes for i in range(2): # Assuming 2 conv layers as per sscp_conv_... arrays kernel_h, kernel_w = config.sscp_conv_kernel_size[i] stride_h, stride_w = config.sscp_conv_stride_size[i] # Padding for Time (Height for Conv2d) - REVERSE_CAUSAL like # JAX 'reverse_causal' padding is (0, kernel_size - 1) pad_t_top = 0 pad_t_bottom = kernel_h - 1 # Frequency Padding (Width for Conv2d) # Based on JAX effective padding (1,1) for F_in=10, K_w=3, S_w=2 # and the successful test configuration. # If kernel/stride/input_freq for frequency changes, this might need re-evaluation # to match generic JAX 'SAME' behavior if it differs. pad_f_left = 1 pad_f_right = 1 manual_padding_tuple = ( pad_f_left, pad_f_right, pad_t_top, pad_t_bottom, ) calculated_block_padding.append(manual_padding_tuple) # Calculate output frequency dimension after this convolution # This uses the actual padding applied and kernel/stride. f_in_padded = current_f_for_block_input + pad_f_left + pad_f_right f_out_after_conv = (f_in_padded - kernel_w) // stride_w + 1 # Assuming dilation_w = 1 calculated_f_out_dims.append(f_out_after_conv) current_f_for_block_input = f_out_after_conv self.conv_0 = Gemma3nAudioSSCPConvBlock( idx=0, input_freq_dim=config.input_feat_size, # Pass original feature dim config=config, manual_padding=calculated_block_padding[0], ) self.conv_1 = Gemma3nAudioSSCPConvBlock( idx=1, input_freq_dim=calculated_f_out_dims[0], # Output freq dim from conv_0 config=config, manual_padding=calculated_block_padding[1], ) final_c_out = config.sscp_conv_channel_size[-1] final_f_out = calculated_f_out_dims[-1] # Final frequency dimension self.input_proj_in_features = final_c_out * final_f_out self.input_proj_linear = nn.Linear(self.input_proj_in_features, self.config.hidden_size, bias=False) def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor: # audio_encodings is [B, T, F_in] # Reshape to [B, 1, T, F_in] (Batch, Channels=1, Height=Time, Width=F_in) audio_encodings_reshaped = audio_encodings.unsqueeze(1) x = self.conv_0(audio_encodings_reshaped) x = self.conv_1(x) # x from conv_1 is [B, C_out_1, T_out_1, F_out_1] b, c_out, t_out, f_out = x.shape # Permute to [B, T_out_1, F_out_1, C_out_1] then flatten F_out_1 and C_out_1 x_permuted = x.permute(0, 2, 3, 1).contiguous() output_flattened = x_permuted.view(b, t_out, f_out * c_out) output = self.input_proj_linear(output_flattened) return output class Gemma3nAudioConformerAttention(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.post_in_features = self.config.hidden_size self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False) self.pre_attn_norm = Gemma3nRMSNorm(self.config.hidden_size) self.attn = Gemma3nAudioAttention(config) self.post = nn.Linear(self.post_in_features, self.config.hidden_size, bias=False) self.post_norm = Gemma3nRMSNorm(self.config.hidden_size) def forward(self, audio_encodings: torch.Tensor, audio_mel_mask: torch.BoolTensor) -> torch.Tensor: audio_encodings_input_to_attn = audio_encodings audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) audio_encodings_norm = self.pre_attn_norm(audio_encodings) # Output of self.attn is [B, T, NumHeads, HeadDim] audio_encodings_attn_out = self.attn(audio_encodings_norm, audio_mel_mask) # Reshape from [B, T, NumHeads, HeadDim] to [B, T, NumHeads * HeadDim] # NumHeads * HeadDim = hidden_size b, t, num_heads, head_dim = audio_encodings_attn_out.shape audio_encodings_reshaped = audio_encodings_attn_out.reshape(b, t, num_heads * head_dim) audio_encodings = self.post(audio_encodings_reshaped) audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) return audio_encodings_input_to_attn + self.post_norm(audio_encodings) class Gemma3nAudioConformerFeedForward(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False) self.pre_layer_norm = Gemma3nRMSNorm(self.config.hidden_size) self.ffw_layer_1 = nn.Linear(self.config.hidden_size, self.config.hidden_size * 4, bias=False) self.ffw_layer_2 = nn.Linear(self.config.hidden_size * 4, self.config.hidden_size, bias=False) self.post_layer_norm = Gemma3nRMSNorm(self.config.hidden_size) self.post_layer_scale = torch.tensor(self.config.conf_residual_weight) def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor: residual = audio_encodings audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) audio_encodings = self.pre_layer_norm(audio_encodings) audio_encodings: torch.Tensor = self.ffw_layer_1(audio_encodings) audio_encodings = nn.functional.silu(audio_encodings) audio_encodings: torch.Tensor = self.ffw_layer_2(audio_encodings) audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) audio_encodings = self.post_layer_norm(audio_encodings) return residual + (audio_encodings * self.post_layer_scale) class Gemma3nAudioConformerLightConv1d(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.pre_layer_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps) self.linear_start = nn.Linear(self.config.hidden_size, self.config.hidden_size * 2, bias=False) self.depthwise_conv1d = nn.Conv1d( in_channels=self.config.hidden_size, out_channels=self.config.hidden_size, kernel_size=self.config.conf_conv_kernel_size, stride=1, padding=0, # Manual causal padding groups=self.config.hidden_size, # Depthwise bias=False, ) self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False) self.conv_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps) self.linear_end = nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False) self.causal_padding = self.config.conf_conv_kernel_size - 1 def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor: audio_encodings_residual = audio_encodings # Save for residual connection audio_encodings = self.pre_layer_norm(audio_encodings) audio_encodings = self.linear_start(audio_encodings) audio_encodings = torch.nn.functional.glu(audio_encodings, dim=-1) # Permute for Conv1d: [B, T, D] -> [B, D, T] audio_encodings_permuted = audio_encodings.permute(0, 2, 1) # Apply manual causal padding audio_encodings_permuted_padded = F.pad(audio_encodings_permuted, (self.causal_padding, 0)) audio_encodings = self.depthwise_conv1d(audio_encodings_permuted_padded) # Permute back: [B, D, T_out] -> [B, T_out, D] audio_encodings = audio_encodings.permute(0, 2, 1) audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) audio_encodings = self.conv_norm(audio_encodings) audio_encodings = nn.functional.silu(audio_encodings) audio_encodings = self.linear_end(audio_encodings) output = audio_encodings + audio_encodings_residual return output class Gemma3nAudioConformerBlock(nn.Module): def __init__(self, config: Gemma3nAudioConfig): super().__init__() self.config = config self.ffw_layer_start = Gemma3nAudioConformerFeedForward(self.config) self.attention = Gemma3nAudioConformerAttention(self.config) self.lconv1d = Gemma3nAudioConformerLightConv1d(self.config) self.ffw_layer_end = Gemma3nAudioConformerFeedForward(self.config) self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False) self.norm = Gemma3nRMSNorm(self.config.hidden_size) def forward(self, audio_encodings: torch.Tensor, audio_mel_mask: torch.BoolTensor) -> torch.Tensor: audio_encodings = self.ffw_layer_start(audio_encodings) audio_encodings = self.attention(audio_encodings, audio_mel_mask) validity_mask_for_lconv = ~audio_mel_mask # True for valid audio_encodings_for_lconv_input = audio_encodings * validity_mask_for_lconv.unsqueeze(-1).to( audio_encodings.dtype ) audio_encodings = self.lconv1d(audio_encodings_for_lconv_input) audio_encodings = self.ffw_layer_end(audio_encodings) audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping) output = self.norm(audio_encodings) return output class Gemma3nAudioEncoder(PreTrainedModel): """An audio encoder based on the [Universal Speech Model](https://arxiv.org/abs/2303.01037) architecture.""" config: Gemma3nAudioConfig main_input_name = "audio_mel" def __init__(self, config: Gemma3nAudioConfig): super().__init__(config) self.config = config self.subsample_conv_projection = Gemma3nAudioSubSampleConvProjection(config) self.conformer = nn.ModuleList( [Gemma3nAudioConformerBlock(config) for _ in range(config.conf_num_hidden_layers)] ) def forward( self, audio_mel: torch.Tensor, audio_mel_mask: torch.BoolTensor ) -> tuple[torch.Tensor, torch.BoolTensor]: """Encodes a batch of MELs. Args: audio_mel: a torch.Tensor of shape [batch, num_frames, num_channels, mel_bins]. Returns: audio_encodings: a torch.Tensor of shape `[batch_size, self.config.audio_soft_tokens_per_image, self.config.audio_config.hidden_size]` audio_mel_mask: a torch.BoolTensor of shape [batch, num_frames]. """ audio_encodings = self.subsample_conv_projection(audio_mel) # audio_encodings: [B, T_sub, D] # Subsample the input audio_mel_mask to match the time dimension of audio_encodings (T_sub) t_sub = audio_encodings.shape[1] time_stride_product = 1 for stride_pair_idx in range(len(self.config.sscp_conv_stride_size)): time_stride_product *= self.config.sscp_conv_stride_size[stride_pair_idx][0] # Create indices for gathering from the original mask. # These indices map to original time steps corresponding to the start of each # receptive field in the subsampled output. indices = torch.arange(t_sub, device=audio_mel_mask.device) * time_stride_product indices = torch.clamp(indices, max=audio_mel_mask.shape[1] - 1) # Ensure indices are valid # Expand indices for batch compatibility if B > 1 and indices is 1D. if audio_mel_mask.ndim > 1 and indices.ndim == 1: indices = indices.unsqueeze(0).expand(audio_mel_mask.shape[0], -1) # [B, T_sub] elif ( audio_mel_mask.ndim == indices.ndim and audio_mel_mask.shape[0] == 1 and indices.shape[0] != 1 and t_sub == indices.shape[0] ): # Handle case where B=1 but indices became [T_sub] instead of [1, T_sub] indices = indices.unsqueeze(0) current_mask = torch.gather(audio_mel_mask, 1, indices) # [B, T_sub] for block in self.conformer: audio_encodings = block(audio_encodings, current_mask) # Pass the processed mask if self.config.conf_reduction_factor > 1: audio_encodings = audio_encodings[:, :: self.config.conf_reduction_factor] # Reduce the mask as well current_mask = current_mask[:, :: self.config.conf_reduction_factor] audio_encodings = audio_encodings.masked_fill(current_mask.unsqueeze(-1), 0.0) return audio_encodings, current_mask # ==== Language Model ==== class Gemma3nTextScaledWordEmbedding(Gemma3TextScaledWordEmbedding): pass class Gemma3nTextLaurelBlock(nn.Module): """Learned Augmented Residual Layer""" def __init__(self, config: Gemma3nTextConfig): super().__init__() self.config = config self.linear_left = nn.Linear(self.config.hidden_size, self.config.laurel_rank, bias=False) self.linear_right = nn.Linear(self.config.laurel_rank, self.config.hidden_size, bias=False) self.post_laurel_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: laurel_hidden_states: torch.Tensor = self.linear_left(hidden_states) laurel_hidden_states: torch.Tensor = self.linear_right(laurel_hidden_states) normed_laurel_hidden_states = self.post_laurel_norm(laurel_hidden_states) return hidden_states + normed_laurel_hidden_states class Gemma3nTextMLP(Gemma2MLP): def __init__(self, config: Gemma3nTextConfig, layer_idx: int = 0): super().__init__(config) self.intermediate_size = config.intermediate_size[layer_idx] self.activation_sparsity = config.activation_sparsity_pattern[layer_idx] def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: gate_proj = self.gate_proj(hidden_states) if self.activation_sparsity > 0.0: gate_proj = self._gaussian_topk(gate_proj) activations = self.act_fn(gate_proj) up_proj = self.up_proj(hidden_states) down_proj = self.down_proj(activations * up_proj) return down_proj def _gaussian_topk(self, inputs: torch.Tensor) -> torch.Tensor: target_sparsity_tensor = torch.tensor(self.activation_sparsity, dtype=torch.float32, device=inputs.device) # normal_dist and std_multiplier are adapted from jax.scipy.stats.norm.ppf(). # # References: # * https://docs.jax.dev/en/latest/_autosummary/jax.scipy.stats.norm.ppf.html # * https://pytorch.org/docs/stable/distributions.html#torch.distributions.normal.Normal # * https://pytorch.org/docs/stable/distributions.html#torch.distributions.transformed_distribution.TransformedDistribution.icdf normal_dist = torch.distributions.normal.Normal(0, 1) std_multiplier: torch.Tensor = normal_dist.icdf(target_sparsity_tensor) std_multiplier = std_multiplier.type(inputs.dtype) inputs_mean = torch.mean(inputs, dim=-1, keepdim=True) inputs_std = torch.std(inputs, dim=-1, keepdim=True, unbiased=False) cutoff_x = inputs_mean + inputs_std * std_multiplier return nn.functional.relu(inputs - cutoff_x) class Gemma3nTextAltUp(nn.Module): """Alternating Updates (AltUp) The AltUp module wraps transformer layers. The `predict` step modifies the input to the transformer layer, and the `correct` step propagates the output of the transformer layer to the sparsely updated dimensions. See more in the research paper: https://proceedings.neurips.cc/paper_files/paper/2023/file/f2059277ac6ce66e7e5543001afa8bb5-Paper-Conference.pdf """ def __init__(self, config: Gemma3nTextConfig): super().__init__() self.config = config self.correct_output_scale = nn.Parameter(torch.zeros(self.config.hidden_size)) self.correction_coefs = nn.Linear(self.config.altup_num_inputs, self.config.altup_num_inputs, bias=False) self.prediction_coefs = nn.Linear(self.config.altup_num_inputs, self.config.altup_num_inputs**2, bias=False) self.modality_router = nn.Linear(self.config.hidden_size, self.config.altup_num_inputs, bias=False) self.router_norm = Gemma3nRMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps) self.register_buffer("router_input_scale", torch.tensor(self.config.hidden_size**-1.0), persistent=False) def compute_router_modalities(self, x: torch.Tensor) -> torch.Tensor: router_inputs = self.router_norm(x) * self.router_input_scale routed = self.modality_router(router_inputs) return torch.tanh(routed.float()).type_as(x) def predict(self, hidden_states: torch.Tensor) -> torch.Tensor: """Predicts the output of a layer using a trainable map. Args: hidden_states: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` derived by stacking the input embeddings and preprocessing the last `num_altup_inputs - 1` matrices. Returns: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` containing the predictions. """ modalities = self.compute_router_modalities(hidden_states[self.config.altup_active_idx]) if self.training and self.config.altup_coef_clip is not None: self.prediction_coefs.weight.data.clamp_(-self.config.altup_coef_clip, self.config.altup_coef_clip) # Project and then transpose all 2D matrices contained so that mulmat gives the correct result all_coefs: torch.Tensor = ( self.prediction_coefs(modalities) .reshape(*modalities.shape[:-1], self.config.altup_num_inputs, self.config.altup_num_inputs) .permute(0, 1, 3, 2) ) # permute hidden_states to [batch_size, num_tokens, hidden_size, altup_num_inputs] predictions = torch.matmul(hidden_states.permute(1, 2, 3, 0), all_coefs) predictions = predictions.permute(3, 0, 1, 2) # undo the permute predictions += hidden_states # add the original input return predictions.contiguous().type_as(hidden_states) def correct(self, predictions: torch.Tensor, activated: torch.Tensor) -> torch.Tensor: """Corrects the predictions relative to the Args: predictions: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` derived by stacking the input embeddings and preprocessing the last `num_altup_inputs - 1` matrices. activated: A 3D tensor of shape `[batch_size, num_tokens, hidden_size]` containing the activated inputs. Returns: A 4D tensor of shape `[num_altup_inputs, batch_size, num_tokens, hidden_size]` correcting the original predictions relative to the activated input embeddings. """ modalities = self.compute_router_modalities(activated) innovation = activated - predictions[self.config.altup_active_idx] # (batch, num_tokens, hidden_size) innovation = innovation.repeat(self.config.altup_num_inputs, 1, 1, 1) # Repeat on dim0 to match predictions if self.config.altup_coef_clip is not None: self.correction_coefs.weight.data.clamp_(-self.config.altup_coef_clip, self.config.altup_coef_clip) # all_coefs adapted from jax.numpy.einsum("...p,pi->...i", ...) # Permute to (altup_num_inputs, batch_size, num_tokens) as the last dim is a scalar applied to each altup input # and expand on dim1 for broadcastability all_coefs: torch.Tensor = self.correction_coefs(modalities) + 1.0 all_coefs = all_coefs.permute(2, 0, 1).unsqueeze(-1) corrected = torch.mul(innovation, all_coefs) corrected += predictions # add the original input return corrected.contiguous().type_as(activated) def forward(self, corrected: torch.Tensor) -> torch.Tensor: """ This is only defined as the `forward` so that accelerate hooks can move correctly `correct_output_scale` (which is a nn.Parameter, not a Module) between devices when offloading. It is otherwise only used in `scale_corrected_output` """ return (corrected.type_as(self.correct_output_scale) * self.correct_output_scale).type_as(corrected) def scale_corrected_output(self, corrected: torch.Tensor) -> torch.Tensor: """Scales the provided 3D tensor of shape [batch_size, num_tokens, hidden_size].""" return self.forward(corrected) class Gemma3nTextRotaryEmbedding(Gemma2RotaryEmbedding): pass def apply_rotary_pos_emb( x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, position_ids: Optional[torch.Tensor] = None, unsqueeze_dim: int = 1, ): """Applies Rotary Position Embedding to the query and key tensors. Args: x (`torch.Tensor`): The tensor to embed. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) return (x * cos) + (rotate_half(x) * sin) class Gemma3nTextAttention(Gemma3Attention): def __init__(self, config: Gemma3nTextConfig, layer_idx: int): super().__init__() del self.attn_logit_softcapping del self.scaling self.v_norm = Gemma3nRMSNorm(dim=config.head_dim, eps=config.rms_norm_eps, with_scale=False) first_kv_shared_layer_idx = self.config.num_hidden_layers - self.config.num_kv_shared_layers self.is_kv_shared_layer = layer_idx >= first_kv_shared_layer_idx > 0 # Find the index of the last sliding or full layer before sharing starts (or None if no sharing) layer_type = config.layer_types[layer_idx] self.kv_shared_layer_index = ( first_kv_shared_layer_idx - 1 - config.layer_types[first_kv_shared_layer_idx - 1 :: -1].index(layer_type) if self.is_kv_shared_layer else None ) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.config.head_dim) cos, sin = position_embeddings query_states = self.q_proj(hidden_states).view(hidden_shape) query_states = self.q_norm(query_states) query_states = apply_rotary_pos_emb(query_states, cos, sin, unsqueeze_dim=2) query_states = query_states.transpose(1, 2) if self.is_kv_shared_layer and self.kv_shared_layer_index is not None and past_key_values is not None: # In this case we need special handling of the slice as the layer is of fixed small size (for full layers, we never go beyond) layer = past_key_values.layers[self.kv_shared_layer_index] # Device of past layer may be different from current one indices = cache_position.to(layer.keys.device) # Sliding window cache layers might have smaller size (for full layers, we never go beyond) if isinstance(layer, SlidingWindowLayer): if cache_position.shape[0] > layer.get_max_cache_shape(): indices = slice(0, layer.get_max_cache_shape()) else: indices = indices.clamp(min=0, max=layer.get_max_cache_shape() - 1) # Device of past layer may be different from current one key_states = layer.keys[:, :, indices].to(query_states.device) value_states = layer.values[:, :, indices].to(query_states.device) else: key_states = self.k_proj(hidden_states).view(hidden_shape) key_states = self.k_norm(key_states) key_states = apply_rotary_pos_emb(key_states, cos, sin, unsqueeze_dim=2) key_states = key_states.transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape) value_states = self.v_norm(value_states) value_states = value_states.transpose(1, 2) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = { "sin": sin, "cos": cos, "cache_position": cache_position, "sliding_window": self.sliding_window, } key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=self.attention_dropout if self.training else 0.0, scaling=1.0, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Gemma3nTextDecoderLayer(Gemma3DecoderLayer): def __init__(self, config: Gemma3nTextConfig, layer_idx: int): super().__init__(config, layer_idx) self.mlp = Gemma3nTextMLP(config, layer_idx=layer_idx) self.hidden_size_per_layer_input = config.hidden_size_per_layer_input self.act_fn = ACT2FN[config.hidden_activation] self.altup = Gemma3nTextAltUp(config) self.laurel = Gemma3nTextLaurelBlock(config) self.self_attn = Gemma3nTextAttention(config, layer_idx) self.per_layer_input_gate = nn.Linear(self.hidden_size, self.hidden_size_per_layer_input, bias=False) self.per_layer_projection = nn.Linear(self.hidden_size_per_layer_input, self.hidden_size, bias=False) self.post_per_layer_input_norm = Gemma3nRMSNorm(self.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings_global: torch.Tensor, position_embeddings_local: torch.Tensor, per_layer_input: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: predictions = self.altup.predict(hidden_states) active_prediction = predictions[self.config.altup_active_idx] active_prediction_normed = self.input_layernorm(active_prediction) laurel_output = self.laurel(active_prediction_normed) # apply global RoPE to non-sliding layer only if self.self_attn.is_sliding: position_embeddings = position_embeddings_local else: position_embeddings = position_embeddings_global attn, self_attn_weights = self.self_attn( hidden_states=active_prediction_normed, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) attn = self.post_attention_layernorm(attn) attn_gated = active_prediction + attn attn_laurel = (attn_gated + laurel_output) / math.sqrt(2) attn_norm = self.pre_feedforward_layernorm(attn_laurel) attn_ffw = self.mlp(attn_norm) attn_ffw_norm = self.post_feedforward_layernorm(attn_ffw) attn_ffw_laurel_gated = attn_laurel + attn_ffw_norm corrected_predictions = self.altup.correct(predictions, attn_ffw_laurel_gated) first_prediction = corrected_predictions[self.config.altup_active_idx].clone() if self.config.altup_correct_scale: first_prediction = self.altup.scale_corrected_output(first_prediction) # per_layer_input_gate adapted from jax.numpy.einsum("btd,dp->btp", ...) first_prediction = self.per_layer_input_gate(first_prediction) first_prediction = self.act_fn(first_prediction) first_prediction = torch.multiply(first_prediction, per_layer_input) # per_layer_projection adapted from jax.numpy.einsum("btp,pd->btd", ...) first_prediction = self.per_layer_projection(first_prediction) first_prediction = self.post_per_layer_input_norm(first_prediction) corrected_predictions[1:] += first_prediction outputs = (corrected_predictions,) if output_attentions: outputs += (self_attn_weights,) return outputs class Gemma3nPreTrainedModel(Gemma2PreTrainedModel): config: Gemma3nConfig base_model_prefix = "" _no_split_modules = ["Gemma3nTextDecoderLayer"] def _init_weights(self, module): Gemma2PreTrainedModel._init_weights(self, module) if isinstance(module, Gemma3nAudioCumulativeGroupNorm): module.weight.data.fill_(1.0) elif isinstance(module, Gemma3nAudioAttention): module.per_dim_scale.data.zero_() elif isinstance(module, Gemma3nTextAltUp): module.correct_output_scale.data.zero_() @auto_docstring(custom_intro="The base Gemma 3n language model without a language modeling head.") class Gemma3nTextModel(Gemma3TextModel): config: Gemma3nTextConfig def __init__(self, config: Gemma3nTextConfig): super().__init__(config) self.hidden_size = config.hidden_size self.hidden_size_per_layer_input = config.hidden_size_per_layer_input self.embed_tokens_per_layer = Gemma3nTextScaledWordEmbedding( config.vocab_size_per_layer_input, config.num_hidden_layers * config.hidden_size_per_layer_input, self.padding_idx, embed_scale=config.hidden_size_per_layer_input**0.5, ) self.per_layer_model_projection = nn.Linear( self.hidden_size, config.num_hidden_layers * config.hidden_size_per_layer_input, bias=False, ) self.per_layer_projection_norm = Gemma3nRMSNorm(config.hidden_size_per_layer_input, eps=config.rms_norm_eps) self.layers = nn.ModuleList( [Gemma3nTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Gemma3nRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.altup_projections = nn.ModuleList( [nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)] ) self.altup_unembed_projections = nn.ModuleList( [nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)] ) self.register_buffer("per_layer_projection_scale", torch.tensor(self.hidden_size**-0.5), persistent=False) self.register_buffer("per_layer_input_scale", torch.rsqrt(torch.tensor(2.0)), persistent=False) self.rotary_emb = Gemma3nTextRotaryEmbedding(config=config) # TODO (raushan): Fix this after RoPE refactor. For now we hack it by # reassigning thetas when we want to create a local RoPE layer. Config # defaults should hold values for global RoPE. config = copy.deepcopy(config) config.rope_theta = config.rope_local_base_freq config.rope_scaling = {"rope_type": "default"} self.rotary_emb_local = Gemma3nTextRotaryEmbedding(config=config) def get_per_layer_inputs(self, input_ids: torch.LongTensor) -> torch.Tensor: return self.embed_tokens_per_layer(input_ids).reshape( *input_ids.shape, self.config.num_hidden_layers, self.hidden_size_per_layer_input, ) def project_per_layer_inputs( self, inputs_embeds: torch.Tensor, per_layer_inputs: Optional[torch.Tensor] = None, ) -> torch.Tensor: per_layer_projection: torch.Tensor = self.per_layer_model_projection(inputs_embeds) per_layer_projection *= self.per_layer_projection_scale.to( dtype=inputs_embeds.dtype, device=per_layer_projection.device ) per_layer_projection = per_layer_projection.reshape( *inputs_embeds.shape[:-1], self.config.num_hidden_layers, self.hidden_size_per_layer_input, ) per_layer_projection = self.per_layer_projection_norm(per_layer_projection) if per_layer_inputs is None: return per_layer_projection if per_layer_projection.shape != per_layer_inputs.shape: # per-layer inputs are sometimes padded with zeros, slice the relevant embeddings. per_layer_inputs = per_layer_inputs[..., : self.config.num_hidden_layers, :] return (per_layer_projection + per_layer_inputs) * self.per_layer_input_scale.to( dtype=inputs_embeds.dtype, device=per_layer_projection.device ) @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, per_layer_inputs: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: r""" per_layer_inputs (torch.Tensor, *optional*, defaults to None): Pre-computed per-layer embeddings. If None, they are derived from input_ids if provided. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if input_ids is not None: inputs_embeds = self.embed_tokens(input_ids) per_layer_inputs = self.get_per_layer_inputs(input_ids) per_layer_inputs = self.project_per_layer_inputs(inputs_embeds, per_layer_inputs) if use_cache and past_key_values is None and not self.training: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device, ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } # embed positions hidden_states_0 = inputs_embeds # Initialize RoPE embeddings position_embeddings_global = self.rotary_emb(hidden_states_0, position_ids) position_embeddings_local = self.rotary_emb_local(hidden_states_0, position_ids) # Expand hidden_states to support per-layer inputs target_magnitude = torch.mean(hidden_states_0**2, dim=-1, keepdim=True) ** 0.5 epsilon_tensor = torch.tensor(1e-5) temp_hidden_states = [hidden_states_0] for i in range(1, self.config.altup_num_inputs): # altup_proj adapted from jax.numpy.einsum("btp,pd->btd", ...) altup_proj = self.altup_projections[i - 1](hidden_states_0) current_hidden_state = altup_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device) new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True) new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device))) current_hidden_state = current_hidden_state * target_magnitude / new_magnitude temp_hidden_states.append(current_hidden_state) hidden_states = torch.stack(temp_hidden_states, dim=0) # [num_altup_inputs, batch, seq_len, hidden_size] # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers[: self.config.num_hidden_layers]: if output_hidden_states: all_hidden_states += (hidden_states,) causal_mask = causal_mask_mapping[decoder_layer.attention_type] per_layer_input = per_layer_inputs[:, :, decoder_layer.layer_idx, :] layer_outputs = decoder_layer( hidden_states, position_embeddings_global, position_embeddings_local, per_layer_input, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last decoder layer (but before reprojecting to stay consistent with layer output) if output_hidden_states: all_hidden_states += (hidden_states,) # Per-layer inputs to single output target_magnitude = torch.mean(hidden_states[0] ** 2, dim=-1, keepdim=True) ** 0.5 temp_hidden_states = [hidden_states[0]] for i in range(1, self.config.altup_num_inputs): # altup_unembed_projections adapted from jax.numpy.einsum("btp,pd->btd", ...) altup_unemb_proj: torch.Tensor = self.altup_unembed_projections[i - 1](hidden_states[i]) current_hidden_state = altup_unemb_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device) new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True) new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device))) current_hidden_state = current_hidden_state * target_magnitude / new_magnitude temp_hidden_states.append(current_hidden_state) hidden_states = torch.stack(temp_hidden_states) hidden_states = torch.mean(hidden_states, dim=0) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring(custom_intro="The base Gemma 3n language model with a language modeling head.") class Gemma3nForCausalLM(Gemma3ForCausalLM): _checkpoint_conversion_mapping = {"model.language_model": "model"} base_model_prefix = "model" class Gemma3nMultimodalEmbedder(nn.Module): """Embeds token ids or soft tokens for multimodal content into language model space.""" def __init__( self, multimodal_config: Union[Gemma3nAudioConfig, Gemma3nVisionConfig], text_config: Gemma3nTextConfig, ): super().__init__() self.multimodal_hidden_size = multimodal_config.hidden_size self.eps = multimodal_config.rms_norm_eps self.vocab_offset = multimodal_config.vocab_offset self.vocab_size = multimodal_config.vocab_size self.text_hidden_size = text_config.hidden_size self.embedding = nn.Embedding(self.vocab_size, self.multimodal_hidden_size) self.hard_embedding_norm = Gemma3nRMSNorm(self.multimodal_hidden_size, eps=self.eps) self.soft_embedding_norm = Gemma3nRMSNorm(self.multimodal_hidden_size, eps=self.eps) self.embedding_projection = nn.Linear(self.multimodal_hidden_size, self.text_hidden_size, bias=False) self.embedding_post_projection_norm = Gemma3nRMSNorm(self.text_hidden_size, eps=self.eps, with_scale=False) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Embeds token ids or soft tokens for multimodal content into language model space. Args: input_ids: A torch.LongTensor containing the token ids to embed. Values should be in the range `[vocab_offset, vocab_offset + vocab_size)`. inputs_embeds: A torch.Tensor containing the soft tokens to embed. Returns: A torch.Tensor of embeddings with shape `[batch_size, seq_len, self.config.text_config.hidden_size]`. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is not None: emb_norm = self.soft_embedding_norm(inputs_embeds) else: hard_emb = self.embedding(input_ids - self.vocab_offset) emb_norm = self.hard_embedding_norm(hard_emb) emb_norm_proj = self.embedding_projection(emb_norm) return self.embedding_post_projection_norm(emb_norm_proj) @auto_docstring( custom_intro=""" The base Gemma 3n model comprising a vision backbone, an audio backbone, and a language model without a language modeling head. """ ) class Gemma3nModel(PaliGemmaModel): _checkpoint_conversion_mapping = {} def __init__(self, config: Gemma3nConfig): super().__init__() del self.multi_modal_projector # Replaced by Gemma3nVisionEmbedder self.vocab_size_per_layer_input = config.text_config.vocab_size_per_layer_input self.audio_tower = AutoModel.from_config(config.audio_config) self.embed_vision = Gemma3nMultimodalEmbedder(config.vision_config, config.text_config) self.embed_audio = Gemma3nMultimodalEmbedder(config.audio_config, config.text_config) def get_image_features(self, pixel_values: torch.Tensor) -> torch.Tensor: """ Projects the last hidden state from the vision model into language model space. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ vision_outputs = self.vision_tower( pixel_values=pixel_values, do_pooling=False, return_dict=True ).last_hidden_state # Convert from (batch, channels, height, width) to (batch, height * width, channels) where: # height == width and height * width == Gemma3nConfig.vision_soft_tokens_per_image. vision_outputs = vision_outputs.reshape( vision_outputs.shape[0], self.config.vision_config.hidden_size, self.config.vision_soft_tokens_per_image, ).permute(0, 2, 1) # Normalize and embed the soft tokens into language model space. vision_outputs *= self.config.vision_config.hidden_size**0.5 return self.embed_vision(inputs_embeds=vision_outputs) def get_placeholder_mask( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_features: Optional[torch.FloatTensor] = None, audio_features: Optional[torch.FloatTensor] = None, ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) special_audio_mask = ( inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device) ) ).all(-1) else: special_image_mask = input_ids == self.config.image_token_id special_audio_mask = input_ids == self.config.audio_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0] * image_features.shape[1]}" ) n_audio_tokens = special_audio_mask.sum() special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if audio_features is not None and inputs_embeds[special_audio_mask].numel() != audio_features.numel(): raise ValueError( f"Audio features and image tokens do not match: tokens: {n_audio_tokens}, features {audio_features.shape[0] * audio_features.shape[1]}" ) return special_image_mask, special_audio_mask @can_return_tuple def forward( self, input_ids: Optional[torch.LongTensor] = None, # text inputs pixel_values: Optional[torch.FloatTensor] = None, # vision inputs input_features: Optional[torch.FloatTensor] = None, # audio inputs attention_mask: Optional[torch.Tensor] = None, input_features_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, token_type_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, **lm_kwargs, ) -> Gemma3nCausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Gemma3nForConditionalGeneration >>> model = Gemma3nForConditionalGeneration.from_pretrained("google/gemma3n2-3b-mix-224") >>> processor = AutoProcessor.from_pretrained("google/gemma3n2-3b-mix-224") >>> prompt = "Where is the cat standing?" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs,) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Where is the cat standing?\nsnow" ``` """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if input_ids is not None: inputs_embeds = self.get_input_embeddings()(input_ids) # Prepare per-layer inputs from inputs_ids per_layer_inputs_mask = torch.logical_and(input_ids >= 0, input_ids < self.vocab_size_per_layer_input) per_layer_inputs_tokens = torch.where(per_layer_inputs_mask, input_ids, torch.zeros_like(input_ids)) per_layer_inputs = self.language_model.get_per_layer_inputs(per_layer_inputs_tokens) # Handle vision tokens (>= embed_vision.vocab_offset and < embed_audio.vocab_offset) vision_mask = torch.logical_and( input_ids >= self.embed_vision.vocab_offset, input_ids < self.embed_audio.vocab_offset ) dummy_vision_token_id = self.embed_vision.vocab_offset + self.embed_vision.vocab_size - 1 vision_input_ids = torch.where(vision_mask, input_ids, dummy_vision_token_id).to(inputs_embeds.device) vision_embeds = self.embed_vision(input_ids=vision_input_ids) expanded_vision_mask = vision_mask.unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds = torch.where(expanded_vision_mask, vision_embeds, inputs_embeds) # Handle audio tokens (>= embed_audio.vocab_offset) audio_mask = input_ids >= self.embed_audio.vocab_offset dummy_audio_token_id = self.embed_audio.vocab_offset + self.embed_audio.vocab_size - 1 audio_input_ids = torch.where(audio_mask, input_ids, dummy_audio_token_id).to(inputs_embeds.device) audio_embeds = self.embed_audio(input_ids=audio_input_ids) expanded_audio_mask = audio_mask.unsqueeze(-1).expand_as(inputs_embeds) inputs_embeds = torch.where(expanded_audio_mask, audio_embeds, inputs_embeds) else: per_layer_inputs = None # Merge text and images if pixel_values is not None: image_features = self.get_image_features(pixel_values) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask, _ = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) # Merge text and audio if input_features is not None and input_features_mask is not None: audio_features, audio_mask = self.get_audio_features(input_features, ~input_features_mask) # The Gemma3nProcessor expects all audio will be 30s in length and inserts 188 audio soft tokens into the # text to account for this. However, the audio preprocessing and encoder do not gurarantee they will # produce 188 soft tokens; they will produce at most that many tokens, but they may produce fewer tokens # depending on the length of the longest audio input in the batch. When we encounter this situation, we pad # the audio feature out to 188 soft tokens with the emebedding of the last token in the embed_audio vocab. audio_padding_toks = torch.tensor([[self.vocab_size - 1]], dtype=torch.long, device=audio_features.device) audio_padding_embs = self.embed_audio(input_ids=audio_padding_toks) audio_features = torch.where(audio_mask.unsqueeze(-1), audio_padding_embs, audio_features) audio_batch_size, audio_seq_len, audio_embed_dim = audio_features.shape extra_padding_tokens = self.config.audio_soft_tokens_per_image - audio_seq_len extra_padding_features = audio_padding_embs.expand(audio_batch_size, extra_padding_tokens, audio_embed_dim) audio_features = torch.cat((audio_features, extra_padding_features), dim=1) audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype) _, special_audio_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, audio_features=audio_features ) inputs_embeds = inputs_embeds.masked_scatter(special_audio_mask, audio_features) outputs = self.language_model( input_ids=None, per_layer_inputs=per_layer_inputs, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **lm_kwargs, ) return Gemma3nModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values if use_cache else None, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, audio_hidden_states=audio_features if input_features is not None else None, ) def get_audio_features( self, input_features: torch.Tensor, input_features_mask: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: """ Projects the last hidden state from the audio encoder into language model space. Args: input_features (`torch.FloatTensor]` of shape `(num_images, seq_length, num_features)`): The tensors corresponding to the input audio. input_features_mask (`torch.FloatTensor]` of shape `(num_images, seq_length)`): The attention mask for the input audio. Returns: audio_features (`torch.Tensor`): Audio feature tensor of shape `(num_images, audio_length, embed_dim)`). """ audio_outputs, audio_mask = self.audio_tower(input_features, input_features_mask) return self.embed_audio(inputs_embeds=audio_outputs), audio_mask def _update_causal_mask(self, **super_kwargs): raise AttributeError("We don't want to inherit it") @auto_docstring( custom_intro=""" The base Gemma 3n model comprising a vision backbone, an audio backbone, a language model, and a language modeling head. """ ) class Gemma3nForConditionalGeneration(PaliGemmaForConditionalGeneration): _checkpoint_conversion_mapping = {} base_model_prefix = "model" @property def audio_tower(self): return self.model.audio_tower @property def multi_modal_projector(self): raise AttributeError("Use embed_vision instead of multi_modal_projector.") @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, # text inputs pixel_values: Optional[torch.FloatTensor] = None, # vision inputs input_features: Optional[torch.FloatTensor] = None, # audio inputs attention_mask: Optional[torch.Tensor] = None, input_features_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, token_type_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **lm_kwargs, ) -> Gemma3nCausalLMOutputWithPast: r""" input_features_mask (torch.Tensor, *optional*, defaults to None): The attention mask for the input audio. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Gemma3ForConditionalGeneration >>> model = Gemma3ForConditionalGeneration.from_pretrained("google/gemma-3-4b-it") >>> processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it") >>> messages = [ ... { ... "role": "system", ... "content": [ ... {"type": "text", "text": "You are a helpful assistant."} ... ] ... }, ... { ... "role": "user", "content": [ ... {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"}, ... {"type": "text", "text": "Where is the cat standing?"}, ... ] ... }, ... ] >>> inputs = processor.apply_chat_template( ... messages, ... tokenizer=True, ... return_dict=True, ... return_tensors="pt", ... add_generation_prompt=True ... ) >>> # Generate >>> generate_ids = model.generate(**inputs) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "user\nYou are a helpful assistant.\n\n\n\n\n\nWhere is the cat standing?\nmodel\nBased on the image, the cat is standing in a snowy area, likely outdoors. It appears to" ``` """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, input_features=input_features, attention_mask=attention_mask, input_features_mask=input_features_mask, position_ids=position_ids, past_key_values=past_key_values, token_type_ids=token_type_ids, cache_position=cache_position, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, **lm_kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) if (final_logit_softcapping := self.config.get_text_config().final_logit_softcapping) is not None: logits = logits / final_logit_softcapping logits = torch.tanh(logits) logits = logits * final_logit_softcapping loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] if attention_mask is not None: # we use the input attention mask to shift the logits and labels, because it is 2D. # we also crop attn mask in case it is longer, which happens in PrefixTuning with peft shift_attention_mask = attention_mask[:, -shift_logits.shape[1] :].to(logits.device) shift_logits = shift_logits[shift_attention_mask.to(logits.device) != 0].contiguous() shift_labels = shift_labels[shift_attention_mask.to(shift_labels.device) != 0].contiguous() else: shift_logits = shift_logits.contiguous() shift_labels = shift_labels.contiguous() # Flatten the tokens loss_fct = nn.CrossEntropyLoss() flat_logits = shift_logits.view(-1, self.config.text_config.vocab_size) flat_labels = shift_labels.view(-1).to(shift_logits.device) loss = loss_fct(flat_logits, flat_labels) return Gemma3nCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, audio_hidden_states=outputs.audio_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, input_features=None, attention_mask=None, input_features_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs, ): # Overwritten -- custom `position_ids` and `pixel_values` handling model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, cache_position=cache_position, use_cache=use_cache, logits_to_keep=logits_to_keep, token_type_ids=token_type_ids, **kwargs, ) # If we're in cached decoding stage, multimodal inputs should be None because input ids do not contain special # tokens anymore. Otherwise multimodal inputs should be passed to model. # NOTE: use_cache=False always needs pixel_values, input_features, and input_features_mask if cache_position[0] == 0: model_inputs["pixel_values"] = pixel_values model_inputs["input_features"] = input_features model_inputs["input_features_mask"] = input_features_mask return model_inputs def _prepare_4d_causal_attention_mask_with_cache_position(self, **super_kwargs): raise AttributeError("Do not inherit _prepare_4d_causal_attention_mask_with_cache_position from PaliGemma") __all__ = [ "Gemma3nAudioConfig", "Gemma3nAudioEncoder", "Gemma3nConfig", "Gemma3nForCausalLM", "Gemma3nForConditionalGeneration", "Gemma3nModel", "Gemma3nPreTrainedModel", # noqa: F822 "Gemma3nTextConfig", "Gemma3nTextModel", "Gemma3nVisionConfig", ]
transformers/src/transformers/models/gemma3n/modular_gemma3n.py/0
{ "file_path": "transformers/src/transformers/models/gemma3n/modular_gemma3n.py", "repo_id": "transformers", "token_count": 55657 }
474
# coding=utf-8 # Copyright 2025 The GLM4 & ZhipuAI team and HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch from ...cache_utils import Cache from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import CausalLMOutputWithPast from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.deprecation import deprecate_kwarg from ..glm.modeling_glm import GlmAttention, GlmForCausalLM, GlmForSequenceClassification, GlmForTokenClassification from ..phi3.modeling_phi3 import Phi3MLP from .configuration_glm4 import Glm4Config from .modeling_glm4 import Glm4RMSNorm logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "THUDM/GLM-4-9B-0414" class Glm4MLP(Phi3MLP): pass class Glm4DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: Glm4Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Glm4Attention(config=config, layer_idx=layer_idx) self.mlp = Glm4MLP(config) self.input_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_self_attn_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_mlp_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.post_self_attn_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.post_mlp_layernorm(hidden_states) hidden_states = residual + hidden_states return hidden_states class Glm4Attention(GlmAttention): pass class Glm4ForCausalLM(GlmForCausalLM): def forward( self, **super_kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, Glm4ForCausalLM >>> model = Glm4ForCausalLM.from_pretrained("THUDM/GLM-4-9B-0414") >>> tokenizer = AutoTokenizer.from_pretrained("THUDM/GLM-4-9B-0414") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" return super().forward(**super_kwargs) class Glm4ForSequenceClassification(GlmForSequenceClassification): pass class Glm4ForTokenClassification(GlmForTokenClassification): pass __all__ = [ "Glm4PreTrainedModel", # noqa: F822 "Glm4Model", # noqa: F822 "Glm4ForCausalLM", "Glm4ForSequenceClassification", "Glm4ForTokenClassification", ]
transformers/src/transformers/models/glm4/modular_glm4.py/0
{ "file_path": "transformers/src/transformers/models/glm4/modular_glm4.py", "repo_id": "transformers", "token_count": 2124 }
475
Here is how to convert a GPT2 model generated outside of `transformers` * [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)-generated model: Use [convert_megatron_gpt2_checkpoint.py](../megatron_gpt2/convert_megatron_gpt2_checkpoint.py) * [big-science fork of Megatron-Deepspeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed/)-generated model: Use the instructions [here](https://github.com/bigscience-workshop/bigscience/tree/aa872e754106f6678e8a9dac8c6962404ba39a6d/train/tr1-13B-base#checkpoint-conversion-and-upload). This approach uses a set of scripts that require the use of this particular fork of Megatron-Deepspeed.
transformers/src/transformers/models/gpt2/CONVERSION.md/0
{ "file_path": "transformers/src/transformers/models/gpt2/CONVERSION.md", "repo_id": "transformers", "token_count": 216 }
476
# coding=utf-8 # Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Optional import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_gpt_neo import GPTNeoConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "GPTNeoConfig" _CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" GPT_NEO_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ GPT_NEO_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxGPTNeoSelfAttention(nn.Module): config: GPTNeoConfig attention_type: str dtype: jnp.dtype = jnp.float32 def setup(self): config = self.config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and " f"`num_heads`: {self.num_heads})." ) self.attn_dropout = nn.Dropout(config.attention_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) dense = partial( nn.Dense, self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(use_bias=False), dense(use_bias=False), dense(use_bias=False) self.out_proj = dense() self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") if self.attention_type == "local": self.causal_mask = self.causal_mask ^ jnp.tril(self.causal_mask, -config.window_size) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slightly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): query = self.q_proj(hidden_states) * jnp.sqrt(self.head_dim).astype(self.dtype) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query) key = self._split_heads(key) value = self._split_heads(value) query_length, key_length = query.shape[1], key.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] batch_size = hidden_states.shape[0] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) dropout_rng = None if not deterministic and self.config.attention_dropout > 0.0: dropout_rng = self.make_rng("dropout") # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.has_variable("cache", "cached_key") or init_cache: key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) # transform boolean mask into float mask attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) # usual dot product attention attn_weights = dot_product_attention_weights( query, key, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_dropout, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output, deterministic=deterministic) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxGPTNeoAttention(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): attention_type = self.config.attention_layers[self.layer_id] self.attention = FlaxGPTNeoSelfAttention(self.config, attention_type, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): return self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) class FlaxGPTNeoMLP(nn.Module): config: GPTNeoConfig intermediate_size: int dtype: jnp.dtype = jnp.float32 def setup(self): embed_dim = self.config.hidden_size kernel_init = jax.nn.initializers.normal(self.config.initializer_range) self.c_fc = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) self.c_proj = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) self.act = ACT2FN[self.config.activation_function] self.dropout = nn.Dropout(rate=self.config.resid_dropout) def __call__(self, hidden_states, deterministic: bool = True): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxGPTNeoBlock(nn.Module): config: GPTNeoConfig layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): hidden_size = self.config.hidden_size inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.attn = FlaxGPTNeoAttention(self.config, layer_id=self.layer_id, dtype=self.dtype) self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) self.mlp = FlaxGPTNeoMLP(self.config, inner_dim, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, ): residual = hidden_states hidden_states = self.ln_1(hidden_states) outputs = self.attn( hidden_states, attention_mask=attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) # residual connection attn_output = outputs[0] hidden_states = attn_output + residual residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) # residual connection hidden_states = residual + feed_forward_hidden_states return (hidden_states,) + outputs[1:] class FlaxGPTNeoPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPTNeoConfig base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: GPTNeoConfig, input_shape: tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) def __call__( self, input_ids, attention_mask=None, position_ids=None, params: Optional[dict] = None, past_key_values: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_ids.shape if position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, False, output_attentions, output_hidden_states, return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] return outputs class FlaxGPTNeoBlockCollection(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.blocks = [ FlaxGPTNeoBlock(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for block in self.blocks: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = block( hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) # this contains possible `None` values - `FlaxGPTNeoModule` will filter them out outputs = (hidden_states, all_hidden_states, all_attentions) return outputs class FlaxGPTNeoModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.embed_dim = self.config.hidden_size embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) self.wte = nn.Embed( self.config.vocab_size, self.embed_dim, embedding_init=embedding_init, ) self.wpe = nn.Embed( self.config.max_position_embeddings, self.embed_dim, embedding_init=embedding_init, ) self.dropout = nn.Dropout(rate=self.config.embed_dropout) self.h = FlaxGPTNeoBlockCollection(self.config, dtype=self.dtype) self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, deterministic=True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): input_embeds = self.wte(input_ids.astype("i4")) position_embeds = self.wpe(position_ids.astype("i4")) hidden_states = input_embeds + position_embeds hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.h( hidden_states, attention_mask, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) hidden_states = outputs[0] hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = outputs[1] + (hidden_states,) outputs = (hidden_states, all_hidden_states) + outputs[2:] else: outputs = (hidden_states,) + outputs[1:] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=outputs[1], attentions=outputs[-1], ) @add_start_docstrings( "The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top.", GPT_NEO_START_DOCSTRING, ) class FlaxGPTNeoModel(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoModule append_call_sample_docstring(FlaxGPTNeoModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxGPTNeoForCausalLMModule(nn.Module): config: GPTNeoConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.transformer = FlaxGPTNeoModule(self.config, dtype=self.dtype) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) def __call__( self, input_ids, attention_mask, position_ids, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): outputs = self.transformer( input_ids, attention_mask, position_ids, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: return (lm_logits,) + outputs[1:] return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) @add_start_docstrings( """ The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT_NEO_START_DOCSTRING, ) class FlaxGPTNeoForCausalLM(FlaxGPTNeoPreTrainedModel): module_class = FlaxGPTNeoForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since GPTNeo uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) __all__ = ["FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel"]
transformers/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py/0
{ "file_path": "transformers/src/transformers/models/gpt_neo/modeling_flax_gpt_neo.py", "repo_id": "transformers", "token_count": 12155 }
477
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor class for Granite Speech.""" from typing import Union from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...tokenization_utils import PreTokenizedInput, TextInput from ...utils import is_torch_available, logging from ...utils.import_utils import requires_backends if is_torch_available(): import torch logger = logging.get_logger(__name__) class GraniteSpeechProcessor(ProcessorMixin): attributes = ["audio_processor", "tokenizer"] audio_processor_class = "GraniteSpeechFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__( self, audio_processor, tokenizer, audio_token="<|audio|>", chat_template=None, ): self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token super().__init__(audio_processor, tokenizer, chat_template=chat_template) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], audio: Union["torch.Tensor", list["torch.Tensor"]] = None, device: str = "cpu", images=None, videos=None, **kwargs, ) -> BatchFeature: requires_backends(self, ["torch"]) text = self._get_validated_text(text) prompt_strings = text if audio is not None: # NOTE - we intentionally avoid throwing for potentially misaligned # text / audio inputs here because some inference engines will # trigger the conditions due to the way they call multimodal # processors, e.g., vLLM. audio_inputs = self.audio_processor(audio, device=device) # TODO (@alex-jw-brooks); we should add a util to get_num_audio_tokens # from feature lengths and call it here, rather than returning it # from the feature extractor. audio_embed_sizes = audio_inputs.pop("audio_embed_sizes") # Expand the audio placeholders to match the feature dims; this # is similar to how many VLMs handle image tokens, e.g., llava next prompt_strings = [] num_replaced = 0 for sample in text: while self.audio_token in sample: sample = sample.replace( self.audio_token, "<placeholder>" * audio_embed_sizes[num_replaced], 1, ) num_replaced += 1 prompt_strings.append(sample) prompt_strings = [sample.replace("<placeholder>", self.audio_token) for sample in prompt_strings] else: audio_inputs = {} if "padding" not in kwargs: kwargs["padding"] = True text_inputs = self.tokenizer(prompt_strings, **kwargs) return BatchFeature(data={**text_inputs, **audio_inputs}) def _get_validated_text(self, text: Union[str, list]) -> list[str]: if isinstance(text, str): return [text] elif isinstance(text, list) and isinstance(text[0], str): return text raise TypeError("Invalid text provided! Text should be a string or list of strings.") __all__ = ["GraniteSpeechProcessor"]
transformers/src/transformers/models/granite_speech/processing_granite_speech.py/0
{ "file_path": "transformers/src/transformers/models/granite_speech/processing_granite_speech.py", "repo_id": "transformers", "token_count": 1564 }
478
from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_hunyuan_v1_dense import * from .modeling_hunyuan_v1_dense import * from .tokenization_hy import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
transformers/src/transformers/models/hunyuan_v1_dense/__init__.py/0
{ "file_path": "transformers/src/transformers/models/hunyuan_v1_dense/__init__.py", "repo_id": "transformers", "token_count": 159 }
479
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 Idefics model.""" from __future__ import annotations from dataclasses import dataclass import tensorflow as tf from ... import TFPreTrainedModel from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ModelOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFModelInputType, keras_serializable, shape_list, unpack_inputs, ) from ...tf_utils import invert_attention_mask, scaled_dot_product_attention from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_idefics import IdeficsConfig from .perceiver_tf import TFIdeficsPerceiverResampler from .vision_tf import TFIdeficsVisionTransformer logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "IdeficsConfig" @dataclass class TFIdeficsBaseModelOutputWithPast(ModelOutput): """ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(tf.Tensor)`, *optional*): Tuple of `tf.Tensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ last_hidden_state: tf.Tensor | None = None past_key_values: tuple[tuple[tf.Tensor]] | None = None hidden_states: tuple[tf.Tensor] | None = None attentions: tuple[tf.Tensor] | None = None image_hidden_states: tuple[tf.Tensor] | None = None @dataclass class TFIdeficsCausalLMOutputWithPast(ModelOutput): """ Base class for Idefics causal language model (or autoregressive) outputs. Args: loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. image_hidden_states (`tuple(tf.Tensor)`, *optional*): Tuple of `tf.Tensor` (one for the output of the image embeddings, `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver """ loss: tf.Tensor | None = None logits: tf.Tensor | None = None past_key_values: list[tf.Tensor] | None = None hidden_states: tuple[tf.Tensor] | None = None attentions: tuple[tf.Tensor] | None = None image_hidden_states: tuple[tf.Tensor] | None = None def expand_inputs_for_generation( input_ids, expand_size=1, is_encoder_decoder=False, attention_mask=None, encoder_outputs=None, **model_kwargs, ): expanded_return_idx = tf.reshape(tf.repeat(tf.range(tf.shape(input_ids)[0]), expand_size), [-1]) input_ids = tf.gather(input_ids, expanded_return_idx) model_kwargs["pixel_values"] = model_kwargs.get("pixel_values") model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings") model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings") model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask") if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = tf.gather(token_type_ids, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = tf.gather(attention_mask, expanded_return_idx) if model_kwargs["image_attention_mask"] is not None: model_kwargs["image_attention_mask"] = tf.gather(model_kwargs["image_attention_mask"], expanded_return_idx) if model_kwargs["pixel_values"] is not None: model_kwargs["pixel_values"] = tf.gather(model_kwargs["pixel_values"], expanded_return_idx) elif model_kwargs["image_encoder_embeddings"] is not None: model_kwargs["image_encoder_embeddings"] = tf.gather( model_kwargs["image_encoder_embeddings"], expanded_return_idx ) elif model_kwargs["perceiver_embeddings"] is not None: model_kwargs["perceiver_embeddings"] = tf.gather(model_kwargs["perceiver_embeddings"], expanded_return_idx) return input_ids, model_kwargs def update_model_kwargs_for_generation(outputs, model_kwargs): # must have this key set to at least None if "past_key_values" in outputs: model_kwargs["past_key_values"] = outputs.past_key_values else: model_kwargs["past_key_values"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = tf.concat([token_type_ids, token_type_ids[:, -1:, ...]], axis=-1) # update attention masks if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = tf.concat( [attention_mask, tf.ones_like(attention_mask[:, -1:, ...])], axis=-1 ) if "image_attention_mask" in model_kwargs: image_attention_mask = model_kwargs["image_attention_mask"] last_mask = image_attention_mask[:, -1:, ...] model_kwargs["image_attention_mask"] = last_mask # Get the precomputed image_hidden_states model_kwargs["image_hidden_states"] = outputs.image_hidden_states return model_kwargs def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs): token_type_ids = kwargs.get("token_type_ids") # only last token for inputs_ids if past is defined in kwargs if past_key_values is not None: input_ids = input_ids[:, -1:] if token_type_ids is not None: token_type_ids = token_type_ids[:, -1:] attention_mask = kwargs.get("attention_mask") position_ids = kwargs.get("position_ids") if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int64), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) if past_key_values is not None: position_ids = position_ids[:, -1:] pixel_values = kwargs.get("pixel_values") image_encoder_embeddings = kwargs.get("image_encoder_embeddings") perceiver_embeddings = kwargs.get("perceiver_embeddings") image_attention_mask = kwargs.get("image_attention_mask") interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False) return { "input_ids": input_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "pixel_values": pixel_values, "image_encoder_embeddings": image_encoder_embeddings, "perceiver_embeddings": perceiver_embeddings, "image_attention_mask": image_attention_mask, "interpolate_pos_encoding": interpolate_pos_encoding, } def freeze_model(model, module_exceptions=[]): mapping = { "LayerNorm": tf.keras.layers.LayerNormalization, "Dense": tf.keras.layers.Dense, "Embedding": tf.keras.layers.Embedding, } module_exceptions_mapped = [mapping[m] for m in module_exceptions] if not hasattr(model, "layers"): model.trainable = False # It is just a layer return model for layer in model.layers: if module_exceptions and any(isinstance(layer, t) for t in module_exceptions_mapped): layer.trainable = True # Explicitly setting it to true to avoid any mistakes else: layer.trainable = False return model class TFIdeficsDecoupledEmbedding(tf.keras.layers.Embedding): """ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0, then it will create `num_additional_embeddings` additional parameters that are always trained. If `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `tf.keras.layers.Embedding`. """ def __init__( self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: bool | None = False, dtype=None, **kwargs, ) -> None: """ Args: num_embeddings (`int`): Size of the dictionary of embeddings num_additional_embeddings (`int`): Number of additional embeddings. Only useful when you `partially_freeze=True`. embedding_dim (`int`): The size of each embedding vector partially_freeze: (`bool`, *optional*, defaults to `False`): If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen. Note: there are a lot of other parameters to initialize a standard `tf.keras.layers.Embedding` such as `mask_zero`, `input_length` or `embeddings_initializer`. We are not supporting these. """ super().__init__( input_dim=num_embeddings, output_dim=embedding_dim, dtype=dtype, **kwargs, ) self.num_embeddings = num_embeddings self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.trainable = False if self.num_additional_embeddings > 0: self.additional_embedding = tf.keras.layers.Embedding( input_dim=self.num_additional_embeddings, output_dim=embedding_dim, dtype=dtype, name="additional_embedding", ) def call(self, input_ids): """ we have 2 embeddings, with different indices - one pretrained self.weight and another self.additional_embedding.weight that is being trained. in order to make a lookup of the input ids, we: 1. find out the indices of the entries belonging to the 2nd embedding 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd embedding starts from 0 and not num_embeddings 3. perform the 2nd embedding lookup 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index 5. perform the 1st embedding lookup 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices - i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are usually relatively short it's probably not faster or if faster not by much - but might be a good idea to measure. """ if self.num_additional_embeddings == 0: return super().call(input_ids) # Clone so that we don't modify the original input_ids later on input_ids = tf.identity(input_ids) additional_vocab_indices = tf.where(input_ids >= self.num_embeddings) input_ids_additional_vocab = tf.gather_nd(input_ids, additional_vocab_indices) additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings) # for successful lookup replace input_ids with 0, the results of these will be discarded anyway input_ids = tf.tensor_scatter_nd_update( input_ids, additional_vocab_indices, # tensor filled with 0, having the same length as additional_vocab_indices tf.zeros(tf.shape(additional_vocab_indices)[0], dtype=input_ids.dtype), ) full_vector = super().call(input_ids) # overwrite the records with high indices full_vector = tf.tensor_scatter_nd_update(full_vector, additional_vocab_indices, additional_embeddings) return full_vector def extra_repr(self) -> str: return f"num_embeddings={self.num_embeddings}, num_additional_embeddings={self.num_additional_embeddings}, embedding_dim={self.output_dim}, partially_freeze={self.partially_freeze}" class TFIdeficsDecoupledLinear(tf.keras.layers.Layer): """ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0, then it will create `out_additional_features * in_features` additional parameters that are always trained. If `out_additional_features=0`, then the module defaults back to the regular behavior of `tf.keras.layers.Dense`. """ def __init__( self, in_features: int, out_features: int, out_additional_features: int = 0, bias: bool = True, partially_freeze: bool = True, **kwargs, ) -> None: """ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra parameters (if any) will be trainable. If False, default to the regular behavior of tf.keras.layers.Dense. """ super().__init__(**kwargs) self.out_additional_features = out_additional_features self.partially_freeze = partially_freeze self.in_features = in_features self.out_features = out_features self.use_bias = bias if out_additional_features > 0: self.additional_fc = tf.keras.layers.Dense( units=out_additional_features, use_bias=bias, name="additional_fc" ) def call(self, inputs: tf.Tensor) -> tf.Tensor: output = tf.linalg.matmul(a=inputs, b=self.weight, transpose_b=True) if self.bias is not None: output = tf.nn.bias_add(output, self.bias) if self.out_additional_features > 0: additional_features = self.additional_fc(inputs) output = tf.concat([output, additional_features], axis=-1) return output def get_config(self): config = super().get_config() config.update( { "in_features": self.in_features, "out_features": self.out_features, "out_additional_features": self.out_additional_features, "bias": self.bias is not None, "partially_freeze": self.partially_freeze, } ) return config def extra_repr(self) -> str: """Overwriting `nn.Linear.extra_repr` to include new parameters.""" return f"in_features={self.in_features}, out_features={self.out_features}, out_additional_features={self.out_additional_features}, bias={self.bias is not None}, partially_freeze={self.partially_freeze}" @classmethod def from_config(cls, config): return cls(**config) def build(self, input_shape=None): if self.built: return self.built = True self.weight = self.add_weight( shape=(self.out_features, self.in_features), trainable=not self.partially_freeze, name="weight" ) if self.use_bias: self.bias = self.add_weight(shape=(self.out_features,), trainable=not self.partially_freeze, name="bias") else: self.bias = None if getattr(self, "additional_fc", None) is not None: with tf.name_scope(self.additional_fc.name): self.additional_fc.build(self.in_features) def _make_causal_mask(input_ids_shape, dtype, past_key_values_length=0): """ Make causal mask used for bi-directional self-attention, supporting both static and dynamic shapes. """ bsz, tgt_len = input_ids_shape # Create a matrix where only the lower triangle and diagonal are filled with zeros (causal mask) mask = tf.fill((tgt_len, tgt_len), tf.dtypes.as_dtype(dtype).min) mask_cond = tf.range(tgt_len) mask = tf.where(mask_cond[:, None] >= mask_cond[None, :], 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length), dtype=dtype), mask], axis=-1) if bsz is None: # When batch size is dynamic, expand and tile # so we can compile a functional model mask = tf.expand_dims(mask, 0) mask = tf.expand_dims(mask, 0) # shape: (1, 1, tgt_len, tgt_len + past_key_values_length) mask = tf.tile(mask, [bsz, 1, 1, 1]) else: # When batch size is static, directly use broadcast_to mask = tf.broadcast_to(mask[None, None, :, :], (bsz, 1, tgt_len, tgt_len + past_key_values_length)) return mask def _expand_mask(mask, dtype, tgt_len=None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = shape_list(mask) tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = tf.expand_dims(tf.expand_dims(mask, 1), 1) expanded_mask = tf.broadcast_to(expanded_mask, [bsz, 1, tgt_len, src_len]) inverted_mask = 1.0 - tf.cast(expanded_mask, dtype) return tf.where( tf.cast(inverted_mask, bool), tf.fill(dims=shape_list(inverted_mask), value=tf.float32.min), inverted_mask ) class TFIdeficsRMSNorm(tf.keras.layers.Layer): def __init__(self, hidden_size, eps=1e-6, **kwargs): """ TFIdeficsRMSNorm is equivalent to T5LayerNorm """ super().__init__(**kwargs) self.hidden_size = hidden_size self.variance_epsilon = eps def build(self, input_shape): if self.built: return self.built = True self.weight = self.add_weight(name="weight", shape=[self.hidden_size], initializer="ones") super().build(input_shape) def call(self, hidden_states): variance = tf.math.reduce_mean(tf.math.square(tf.cast(hidden_states, tf.float32)), axis=-1, keepdims=True) hidden_states = hidden_states * tf.math.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [tf.float16, tf.bfloat16]: hidden_states = tf.cast(hidden_states, self.weight.dtype) return self.weight * hidden_states class TFIdeficsEmbedding(tf.keras.layers.Layer): def __init__(self, dim, max_position_embeddings=2048, base=10000, **kwargs): super().__init__(**kwargs) self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base self.inv_freq = tf.constant( 1.0 / (self.base ** (tf.range(start=0, limit=self.dim, delta=2, dtype=tf.float32) / self.dim)) ) def _compute_cos_sin(self, seq_len): t = tf.range(seq_len, dtype=self.inv_freq.dtype) freqs = tf.einsum("i, j -> ij", t, self.inv_freq) # Outer multiplication emb = tf.concat((freqs, freqs), axis=-1) return tf.cos(emb), tf.sin(emb) def call(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len is None: seq_len = shape_list(x)[2] return self._compute_cos_sin(seq_len=seq_len) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return tf.concat((-x2, x1), axis=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): cos = tf.gather(cos, position_ids) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim] sin = tf.gather(sin, position_ids) cos = tf.expand_dims(cos, 1) sin = tf.expand_dims(sin, 1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class TFIdeficsMLP(tf.keras.layers.Layer): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, **kwargs, ): super().__init__(**kwargs) self.gate_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name="gate_proj") self.down_proj = tf.keras.layers.Dense(hidden_size, use_bias=False, name="down_proj") self.up_proj = tf.keras.layers.Dense(intermediate_size, use_bias=False, name="up_proj") self.act_fn = get_tf_activation(hidden_act) self.intermediate_size = intermediate_size self.hidden_size = hidden_size def call(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "gate_proj", None) is not None: with tf.name_scope(self.gate_proj.name): self.gate_proj.build(self.hidden_size) if getattr(self, "down_proj", None) is not None: with tf.name_scope(self.down_proj.name): self.down_proj.build(self.intermediate_size) if getattr(self, "up_proj", None) is not None: with tf.name_scope(self.up_proj.name): self.up_proj.build(self.hidden_size) class TFIdeficsAttention(tf.keras.layers.Layer): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, hidden_size: int, num_heads: int, dropout: float = 0.0, is_cross_attention: bool = False, config: IdeficsConfig = None, qk_layer_norms: bool = False, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.dropout = dropout self.config = config self.is_causal = True if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.is_cross_attention = is_cross_attention self.q_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="q_proj", ) self.k_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="k_proj", ) self.v_proj = tf.keras.layers.Dense( num_heads * self.head_dim, use_bias=False, name="v_proj", ) self.o_proj = tf.keras.layers.Dense( hidden_size, use_bias=False, name="o_proj", ) self.rotary_emb = TFIdeficsEmbedding(self.head_dim, name="rotary_emb") self.qk_layer_norms = qk_layer_norms if self.qk_layer_norms: self.q_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name="q_layer_norm") self.k_layer_norm = TFIdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps, name="k_layer_norm") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, past_key_value: tuple[tf.Tensor] | None = None, output_attentions: bool = False, use_cache: bool = False, ) -> tuple[tf.Tensor, tf.Tensor | None, tuple[tf.Tensor] | None]: # if key_value_states are provided this layer is used as a cross-attention layer is_cross_attention = self.is_cross_attention or key_value_states is not None bsz, q_len, _ = shape_list(hidden_states) query_states = self._shape(self.q_proj(hidden_states), q_len, bsz) if not is_cross_attention: key_states = self._shape(self.k_proj(hidden_states), q_len, bsz) value_states = self._shape(self.v_proj(hidden_states), q_len, bsz) else: _, kv_len, _ = shape_list(key_value_states) # Note that, in this case, `kv_len` == `kv_seq_len` key_states = self._shape(self.k_proj(key_value_states), kv_len, bsz) value_states = self._shape(self.v_proj(key_value_states), kv_len, bsz) kv_seq_len = shape_list(key_states)[-2] if past_key_value is not None: kv_seq_len += shape_list(past_key_value[0])[-2] if not is_cross_attention: # Below is to allow symbolic tensors compilation if tf.is_tensor(kv_seq_len): seq_len = tf.reduce_max(kv_seq_len, q_len) else: seq_len = max(kv_seq_len, q_len) cos, sin = self.rotary_emb(value_states, seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) past_key_value = (key_states, value_states) if use_cache else None if self.qk_layer_norms: query_states = self.q_layer_norm(query_states) key_states = self.k_layer_norm(key_states) tf.debugging.assert_equal( tf.shape(attention_mask), [bsz, 1, q_len, kv_seq_len], message=f"Attention weights should be of size {[bsz, 1, q_len, kv_seq_len]}, but is {tf.shape(attention_mask)}", ) attn_output = scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=attention_mask, # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal=self.is_causal and attention_mask is None and q_len > 1, ) tf.debugging.assert_equal( tf.shape(attn_output), [bsz, self.num_heads, q_len, self.head_dim], message=f"Attention weights should be of size {[bsz, self.num_heads, q_len, self.head_dim]}, but is {tf.shape(attn_output)}", ) attn_output = tf.reshape(tf.transpose(attn_output, perm=[0, 2, 1, 3]), (bsz, q_len, self.hidden_size)) attn_output = self.o_proj(attn_output) attn_weights = None if output_attentions: logger.warning_once( "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead" ) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if self.is_cross_attention: kv_input_dim = ( self.hidden_size if not hasattr(self.config.vision_config, "embed_dim") else self.config.vision_config.embed_dim ) else: kv_input_dim = self.hidden_size if getattr(self, "o_proj", None) is not None: with tf.name_scope(self.o_proj.name): self.o_proj.build(self.num_heads * self.head_dim) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build(self.hidden_size) if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build(kv_input_dim) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build(kv_input_dim) if getattr(self, "rotary_emb", None) is not None: with tf.name_scope(self.rotary_emb.name): self.rotary_emb.build(None) class TFIdeficsDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.self_attn = TFIdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, dropout=config.dropout, config=config, name="self_attn", ) self.mlp = TFIdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name="mlp", ) self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="input_layernorm") self.post_attention_layernorm = TFIdeficsRMSNorm( config.hidden_size, eps=config.rms_norm_eps, name="post_attention_layernorm" ) self.dropout = config.dropout def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, past_key_value: tuple[tf.Tensor] | None = None, output_attentions: bool | None = False, use_cache: bool | None = False, training=False, ) -> tuple[tf.Tensor, tuple[tf.Tensor, tf.Tensor] | None]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.dropout) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "mlp", None) is not None: with tf.name_scope(self.mlp.name): self.mlp.build(None) if getattr(self, "input_layernorm", None) is not None: with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) if getattr(self, "post_attention_layernorm", None) is not None: with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) class TFIdeficsGatedCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config: IdeficsConfig, **kwargs): super().__init__(**kwargs) self.hidden_size = config.hidden_size self.cross_attn = TFIdeficsAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, is_cross_attention=True, dropout=config.dropout, config=config, qk_layer_norms=config.qk_layer_norms, name="cross_attn", ) self.mlp = TFIdeficsMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, name="mlp", ) self.input_layernorm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="input_layernorm") self.post_attention_layernorm = TFIdeficsRMSNorm( config.hidden_size, eps=config.rms_norm_eps, name="post_attention_layernorm" ) self.config = config.dropout self.act_cross_attn = tf.keras.activations.tanh self.act_dense = tf.keras.activations.tanh self.alpha_initializer = config.alpha_initializer self.alpha_type = config.alpha_type self.alphas_initializer_range = config.alphas_initializer_range def build(self, input_shape): if self.built: return self.built = True if self.alpha_initializer == "zeros": if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer="zeros", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer="zeros", trainable=True, name="alpha_dense" ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer="zeros", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight(shape=(1,), initializer="zeros", trainable=True, name="alpha_dense") else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") elif self.alpha_initializer == "ones": if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer="ones", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer="ones", trainable=True, name="alpha_dense" ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer="ones", trainable=True, name="alpha_cross_attn" ) self.alpha_dense = self.add_weight(shape=(1,), initializer="ones", trainable=True, name="alpha_dense") else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") elif self.alpha_initializer in {"normal", "gaussian", "random"}: if self.alpha_type == "vector": self.alpha_cross_attn = self.add_weight( shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_cross_attn", ) self.alpha_dense = self.add_weight( shape=(1, 1, self.hidden_size), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_dense", ) elif self.alpha_type == "float": self.alpha_cross_attn = self.add_weight( shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_type", ) self.alpha_dense = self.add_weight( shape=(1,), initializer=tf.keras.initializers.RandomNormal(mean=0.0, stddev=self.alphas_initializer_range), trainable=True, name="alpha_dense", ) else: raise ValueError(f"Unknown value for `alpha_type` ({self.alpha_type})") else: raise NotImplementedError(f"Alpha initialization scheme {self.alpha_initializer} not yet implemented!") if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")): raise ValueError("Alpha parameters not initialized correctly!") with tf.name_scope(self.cross_attn.name): self.cross_attn.build(None) with tf.name_scope(self.mlp.name): self.mlp.build(None) with tf.name_scope(self.input_layernorm.name): self.input_layernorm.build(None) with tf.name_scope(self.post_attention_layernorm.name): self.post_attention_layernorm.build(None) super().build(input_shape) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None = None, image_hidden_states: tf.Tensor | None = None, image_attention_mask: tf.Tensor | None = None, cross_attention_gate: tf.Tensor | None = None, output_attentions: bool | None = False, use_cache: bool | None = False, past_key_value: tuple[tf.Tensor] | None = None, ) -> tuple[tf.Tensor, tuple[tf.Tensor, tf.Tensor] | None]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states no_images (`bool`, *optional*, defaults to `False`): If `True` the vision part is ignored """ if image_hidden_states is None: raise ValueError( "`image_hidden_states` is required for Idefics cross attention module which are visual features to be" " conditioned on." ) if cross_attention_gate is None: raise ValueError( "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images." ) if past_key_value is not None: raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.") residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.cross_attn( hidden_states=hidden_states, key_value_states=image_hidden_states, attention_mask=image_attention_mask, output_attentions=output_attentions, ) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) mask = tf.cast(cross_attention_gate == 0, dtype=hidden_states.dtype) # Expand dimensions of mask to match hidden_states mask = tf.expand_dims(mask, -1) hidden_states = tf.where( tf.broadcast_to(mask, tf.shape(hidden_states)) == 1, tf.zeros_like(hidden_states), hidden_states ) # when there are no images the model is used in pure language mode # gate = 0 if no_images else 1 hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = tf.nn.dropout(hidden_states, rate=self.config) hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a TensorFlow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) subclass. Use it as a regular TensorFlow Layer and refer to the TensorFlow documentation for all matter related to general usage and behavior. Parameters: config ([`IdeficsConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class TFIdeficsPreTrainedModel(TFPreTrainedModel): config_class = IdeficsConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["TFIdeficsDecoderLayer", "TFIdeficsGatedCrossAttentionLayer"] LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) @keras_serializable class TFIdeficsMainLayer(tf.keras.layers.Layer): """ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`] Args: config: IdeficsConfig """ config_class = IdeficsConfig def __init__(self, config: IdeficsConfig, add_pooling_year: bool = True, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = TFIdeficsDecoupledEmbedding( num_embeddings=config.vocab_size, num_additional_embeddings=config.additional_vocab_size, embedding_dim=config.hidden_size, partially_freeze=config.freeze_text_layers, name="embed_tokens", ) self.image_size = config.vision_config.image_size self.vision_config = config.vision_config self.vision_model = TFIdeficsVisionTransformer(config.vision_config, name="vision_model") # Perceiver Resampler if config.use_resampler: perceiver_config = config.perceiver_config self.perceiver_resampler = TFIdeficsPerceiverResampler( config, config.vision_config.embed_dim, perceiver_config.resampler_depth, perceiver_config.resampler_n_heads, perceiver_config.resampler_head_dim, perceiver_config.resampler_n_latents, name="perceiver_resampler", ) self.decoder_layers = [ TFIdeficsDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers) ] self.cross_layer_interval = config.cross_layer_interval num_cross_layers = config.num_hidden_layers // self.cross_layer_interval self.gated_cross_attn_layers = [ TFIdeficsGatedCrossAttentionLayer(config, name=f"gated_cross_attn_layers.{i}") for i in range(num_cross_layers) ] self.gradient_checkpointing = False self.norm = TFIdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps, name="norm") self.gradient_checkpointing = False self.freeze_relevant_params(config) def freeze_relevant_params(self, config=None): if config is None: config = self.config if config.freeze_text_layers: self.freeze_text_layers(config.freeze_text_module_exceptions) if config.freeze_vision_layers: freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions) def freeze_text_layers(self, module_exceptions=[]): for module in [self.decoder_layers, self.norm]: freeze_model(module, module_exceptions=module_exceptions) def freeze_vision_layers(self, module_exceptions=[]): freeze_model(self.vision_model, module_exceptions=module_exceptions) def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None # if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, past_key_values: list[tf.Tensor] | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, image_encoder_embeddings: tf.Tensor | None = None, perceiver_embeddings: tf.Tensor | None = None, image_attention_mask: tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = False, return_dict: bool | None = None, training: bool | None = None, ) -> TFIdeficsBaseModelOutputWithPast | tuple[tf.Tensor]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = shape_list(input_ids) elif inputs_embeds is not None: batch_size, seq_length, _ = shape_list(inputs_embeds) else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = shape_list(past_key_values[0][0])[2] seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = tf.math.cumsum(tf.cast(attention_mask, dtype=tf.int32), axis=-1) - 1 position_ids = tf.where(attention_mask == 0, 1, position_ids) elif position_ids is None: position_ids = tf.range(past_key_values_length, seq_length + past_key_values_length, dtype=tf.int32) position_ids = tf.expand_dims(position_ids, 0) no_images = False if ( sum((int(pixel_values is None), int(image_encoder_embeddings is None), int(perceiver_embeddings is None))) != 2 ): raise ValueError( "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None." ) elif pixel_values is not None: no_images = tf.reduce_sum(tf.cast(pixel_values, dtype=tf.int32)) == 0 pixel_values = tf.cast(pixel_values, dtype=self.dtype) # fp16 compatibility # Below hack is because when cross-loading pytorch weights, there is an # initial forward pass with dummy input and code below is here to handle that if len(pixel_values.shape) == 4: batch_size = shape_list(pixel_values)[0] num_images = shape_list(pixel_values)[0] # pixel_values = tf.reshape(pixel_values, [batch_size * num_images, *pixel_values.shape[1:]]) elif len(pixel_values.shape) == 5: batch_size, num_images = shape_list(pixel_values)[:2] pixel_values = tf.reshape(pixel_values, [batch_size * num_images, *pixel_values.shape[2:]]) # Get sequence from the vision encoder image_hidden_states = self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding ).last_hidden_state elif image_encoder_embeddings is not None: batch_size, num_images, image_seq_len, image_hidden_size = shape_list(image_encoder_embeddings) image_hidden_states = tf.cast(image_encoder_embeddings, dtype=self.dtype) image_hidden_states = tf.reshape( image_hidden_states, (batch_size * num_images, image_seq_len, image_hidden_size) ) if self.config.use_resampler: if perceiver_embeddings is None: perceiver_embeddings = self.perceiver_resampler(image_hidden_states) image_seq_len, image_hidden_size = shape_list(perceiver_embeddings)[1:3] else: batch_size, num_images, image_seq_len, image_hidden_size = shape_list(perceiver_embeddings) image_hidden_states = perceiver_embeddings elif perceiver_embeddings is None: image_seq_len, image_hidden_size = shape_list(image_hidden_states)[1:3] else: raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True") image_hidden_states = tf.reshape( image_hidden_states, (batch_size, num_images * image_seq_len, image_hidden_size) ) # # Hack to use the model in full language modeling mode # image_attention_mask = tf.zeros((batch_size, seq_length, 1), dtype=tf.int32) # this is to account for the dummy inputs if pixel_values is not None and len(pixel_values.shape) == 4 and image_attention_mask is None: image_attention_mask = tf.zeros((batch_size, seq_length, 1), dtype=tf.int32) text_seq_len = shape_list(image_attention_mask)[1] image_attention_mask = tf.expand_dims(image_attention_mask, -1) image_attention_mask = tf.repeat(image_attention_mask, repeats=image_seq_len) image_attention_mask = tf.reshape(image_attention_mask, (batch_size, text_seq_len, num_images * image_seq_len)) if image_hidden_states is not None: image_batch_size, image_sequence_length, _ = shape_list(image_hidden_states) image_hidden_shape = (image_batch_size, image_sequence_length) if image_attention_mask is None: image_attention_mask = tf.ones(image_hidden_shape, dtype=tf.int32) image_attention_mask = invert_attention_mask(image_attention_mask) else: image_attention_mask = None cross_attention_gate = tf.squeeze( tf.cast(tf.reduce_any(image_attention_mask == 0, axis=-1), dtype=self.dtype), axis=1 ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = tf.ones((batch_size, seq_length_with_past), dtype=tf.bool) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None def vblock( main_block, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, cross_attention_gate, output_attentions, use_cache, layer_idx, cross_layer_interval, gated_cross_attn_layers, ): # TODO(ls): Add cross attention values to respective lists if layer_idx % cross_layer_interval == 0: xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval] outputs = xblock( hidden_states, attention_mask=attention_mask, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, past_key_value=None, # not implemented ) hidden_states = outputs[0] layer_outputs = main_block( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) return layer_outputs if self.gradient_checkpointing and training: past_key_value = None if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = tf.recompute_grad( vblock, decoder_layer, hidden_states, attention_mask, position_ids, past_key_value, image_hidden_states, image_attention_mask, output_attentions, use_cache, no_images, idx, self.cross_layer_interval, self.gated_cross_attn_layers, ) else: layer_outputs = vblock( decoder_layer, hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, image_hidden_states=image_hidden_states, image_attention_mask=image_attention_mask, cross_attention_gate=cross_attention_gate, output_attentions=output_attentions, use_cache=use_cache, layer_idx=idx, cross_layer_interval=self.cross_layer_interval, gated_cross_attn_layers=self.gated_cross_attn_layers, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None image_hidden_states = tf.reshape( image_hidden_states, (batch_size, num_images, image_seq_len, image_hidden_size) ) if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states] if v is not None ) return TFIdeficsBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, image_hidden_states=image_hidden_states, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_tokens", None) is not None: with tf.name_scope(self.embed_tokens.name): self.embed_tokens.build(None) if getattr(self, "vision_model", None) is not None: with tf.name_scope(self.vision_model.name): self.vision_model.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build(None) if getattr(self, "perceiver_resampler", None) is not None: with tf.name_scope(self.perceiver_resampler.name): self.perceiver_resampler.build(None) if getattr(self, "decoder_layers", None) is not None: for layer in self.decoder_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "gated_cross_attn_layers", None) is not None: for layer in self.gated_cross_attn_layers: with tf.name_scope(layer.name): layer.build(None) class TFIdeficsModel(TFIdeficsPreTrainedModel): def __init__(self, config: IdeficsConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFIdeficsMainLayer(config, name="model") def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, past_key_values: list[tf.Tensor] | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, image_encoder_embeddings: tf.Tensor | None = None, perceiver_embeddings: tf.Tensor | None = None, image_attention_mask: tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = False, return_dict: bool | None = None, training: bool | None = None, ) -> TFIdeficsBaseModelOutputWithPast | tuple[tf.Tensor]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) class TFIdeficsForVisionText2Text(TFPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_missing = [r"lm_head.weight"] _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"] config_class = IdeficsConfig def __init__(self, config, vision_model=None, **kwargs): super().__init__(config, **kwargs) self.model = TFIdeficsMainLayer(config, name="model") self.lm_head = TFIdeficsDecoupledLinear( config.hidden_size, config.vocab_size, config.additional_vocab_size, bias=False, partially_freeze=config.freeze_lm_head, name="lm_head", ) def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of IdeficsDecoupledLinear and IdeficsDecoupledEmbedding. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(self.config, "tie_word_embeddings", True): output_embeddings.weight = input_embeddings.weight if input_embeddings.num_additional_embeddings > 0: assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings if hasattr(output_embeddings, "out_additional_features") and hasattr( input_embeddings, "num_additional_embeddings" ): output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings @unpack_inputs @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFIdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, attention_mask: tf.Tensor | None = None, position_ids: tf.Tensor | None = None, past_key_values: list[tf.Tensor] | None = None, inputs_embeds: tf.Tensor | None = None, pixel_values: tf.Tensor | None = None, image_encoder_embeddings: tf.Tensor | None = None, perceiver_embeddings: tf.Tensor | None = None, image_attention_mask: tf.Tensor | None = None, labels: tf.Tensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, interpolate_pos_encoding: bool | None = False, return_dict: bool | None = None, training=False, ) -> TFIdeficsCausalLMOutputWithPast | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >> from transformers import AutoTokenizer, TFIdeficsForVisionText2Text >> model = TFIdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b") >> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceM4/idefics-9b") >> prompt = "Hey, are you consciours? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="tf") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, image_encoder_embeddings=image_encoder_embeddings, perceiver_embeddings=perceiver_embeddings, image_attention_mask=image_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, training=training, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n if attention_mask is not None: shift_attention_mask = attention_mask[..., 1:] shift_logits = logits[..., :-1, :][shift_attention_mask != 0] shift_labels = labels[..., 1:][shift_attention_mask != 0] else: shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] # Flatten the tokens loss = self.hf_compute_loss( labels=tf.reshape(shift_labels, [-1]), logits=tf.reshape(shift_logits, [-1, shift_logits.shape[-1]]) ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return TFIdeficsCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): image_hidden_states = kwargs.pop("image_hidden_states", None) if image_hidden_states is not None: if self.config.use_resampler: kwargs["perceiver_embeddings"] = image_hidden_states else: kwargs["image_encoder_embeddings"] = image_hidden_states kwargs["pixel_values"] = None inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs) unwanted_kwargs = ["token_type_ids"] for kwarg in unwanted_kwargs: inputs.pop(kwarg, None) return inputs @staticmethod def _expand_inputs_for_generation( *args, **model_kwargs, ): return expand_inputs_for_generation(*args, **model_kwargs) @staticmethod def _update_model_kwargs_for_generation(outputs, model_kwargs, is_encoder_decoder): return update_model_kwargs_for_generation(outputs, model_kwargs) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(tf.gather(past_state, beam_idx) for past_state in layer_past),) return reordered_past def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) __all__ = ["TFIdeficsForVisionText2Text", "TFIdeficsModel", "TFIdeficsPreTrainedModel"]
transformers/src/transformers/models/idefics/modeling_tf_idefics.py/0
{ "file_path": "transformers/src/transformers/models/idefics/modeling_tf_idefics.py", "repo_id": "transformers", "token_count": 35496 }
480
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Iterable from typing import Any, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import PaddingMode, pad, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_nested_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging logger = logging.get_logger(__name__) MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum if is_vision_available(): import PIL from PIL import Image def _resize_output_size_rescale_to_max_len( height: int, width: int, min_len: Optional[int] = 1, max_len: Optional[int] = None ) -> tuple[int, int]: """ Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. min_len (`int`, *optional*, defaults to 1): Minimum size of the output image. max_len (`int`, *optional*, defaults to the maximum size of the image): Maximum size of the output image. Returns: The output size of the image after resizing. """ max_len = max(height, width) if max_len is None else max_len aspect_ratio = width / height if width >= height: width = max_len height = int(width / aspect_ratio) if height % 2 != 0: height += 1 elif height > width: height = max_len width = int(height * aspect_ratio) if width % 2 != 0: width += 1 # Avoid resizing to a size smaller than min_len height = max(height, min_len) width = max(width, min_len) return height, width def _resize_output_size_scale_below_upper_bound( height: int, width: int, max_len: Optional[dict[str, int]] = None ) -> tuple[int, int]: """ Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. max_len (`dict[str, int]`, *optional*, defaults to the maximum size of the image): Defines the maximum dimensions of the image. Returns: The output size of the image after resizing. """ max_len = max(height, width) if max_len is None else max_len aspect_ratio = width / height if width >= height and width > max_len: width = max_len height = int(width / aspect_ratio) elif height > width and height > max_len: height = max_len width = int(height * aspect_ratio) # Avoid resizing to a size smaller than 1 height = max(height, 1) width = max(width, 1) return height, width def get_resize_output_image_size( image, resolution_max_side: int, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: """ Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: image (`np.ndarray`): Image to resize. resolution_max_side (`int`): The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the input aspect ratio. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: The output size of the image after resizing. """ height, width = get_image_size(image, channel_dim=input_data_format) # Find the output size, when rescaling the longest edge to max_len and preserving the aspect ratio height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side) # Find the output size when scaling the image to be below the MAX_IMAGE_SIZE height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE) return height, width # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> list[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] def get_max_height_width( images_list: list[list[np.ndarray]], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> list[int]: """ Get the maximum height and width across all images in a batch. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0], num_channels=(1, 3, 4)) max_height = max_width = float("-inf") for images in images_list: for image in images: height, width = get_image_size(image, channel_dim=input_data_format) max_height = max(height, max_height) max_width = max(width, max_width) return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask( image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask def convert_to_rgb( image: np.ndarray, palette: Optional[PIL.ImagePalette.ImagePalette] = None, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> ImageInput: """ Converts an image to RGB format. Args: image (`np.ndarray`): The image to convert. palette (list[int], *optional*): The palette to use if given. data_format (ChannelDimension or str, *optional*): The channel dimension format for the output image. If not provided, it will be the same as the input image. input_data_format (ChannelDimension or str, *optional*): The channel dimension format of the input image. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4)) # For all transformations, we want to keep the same data format as the input image unless otherwise specified. # The resized image from PIL will always have channels last, so find the input format first. data_format = input_data_format if data_format is None else data_format mode = "P" if palette is not None else None image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format) if image.mode == "P" and palette is not None: image.putpalette(palette) image_rgba = image.convert("RGBA") background = Image.new("RGBA", image_rgba.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, image_rgba) alpha_composite = alpha_composite.convert("RGB") output_array = np.array(alpha_composite) # The image is always in channels last format after converting from a PIL image output_array = to_channel_dimension_format(output_array, data_format, input_channel_dim=ChannelDimension.LAST) return output_array # FIXME Amy: make a more general crop function that isn't just centre crop def _crop( image: np.ndarray, w1: int, h1: int, w2: int, h2: int, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: if data_format is None: data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4)) if data_format == ChannelDimension.FIRST: image = image[:, h1:h2, w1:w2] elif data_format == ChannelDimension.LAST: image = image[h1:h2, w1:w2, :] else: raise ValueError("Invalid channel dimension format.") return image class Idefics3ImageProcessor(BaseImageProcessor): r""" Constructs a Idefics3 image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. This is useful if the input image is of a different format e.g. RGBA. Only has an effect if the input image is in the PIL format. do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image. The longest edge of the image is resized to be <= `size["longest_edge"]`, with the shortest edge resized to keep the input aspect ratio. size (`Dict`, *optional*, defaults to `{"longest_edge": 4 * 364}`): Controls the size of the output image. This is a dictionary containing the key "longest_edge". The image will be resized such that the longest edge is <= `size["longest_edge"]` and the shortest edge is resized to keep the input aspect ratio. resample (`Resampling`, *optional*, defaults to `Resampling.LANCZOS`): Resampling filter to use when resizing the image. do_image_splitting (`bool`, *optional*, defaults to `True`): Whether to split the image into sub-images concatenated with the original image. They are split into patches such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`. max_image_size (`Dict`, *optional*, defaults to `{"longest_edge": 364}`): Maximum resolution of the patches of images accepted by the model. This is a dictionary containing the key "longest_edge". do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image. If set to `True`, the image is rescaled to have pixel values between 0 and 1. rescale_factor (`float`, *optional*, defaults to `1/255`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. If set to `True`, the image is normalized to have a mean of `image_mean` and a standard deviation of `image_std`. image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether or not to pad the images to the largest height and width in the batch and number of images per sample in the batch, such that the returned tensor is of shape (batch_size, max_num_images, num_channels, max_height, max_width). """ model_input_names = ["pixel_values", "pixel_attention_mask"] def __init__( self, do_convert_rgb: bool = True, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.LANCZOS, do_image_splitting: bool = True, max_image_size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_pad: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) self.do_convert_rgb = do_convert_rgb self.do_resize = do_resize self.size = size if size is not None else {"longest_edge": 4 * 364} self.resample = resample self.do_image_splitting = do_image_splitting self.max_image_size = max_image_size if max_image_size is not None else {"longest_edge": 364} self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_pad = do_pad def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The longest edge of the image is resized to size["longest_edge"], with the shortest edge resized to keep the input aspect ratio. Can also be used with size["height"] and size["width"]. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image, num_channels=(1, 3, 4)) # For all transformations, we want to keep the same data format as the input image unless otherwise specified. # The resized image from PIL will always have channels last, so find the input format first. data_format = input_data_format if data_format is None else data_format if "longest_edge" in size: size = get_resize_output_image_size( image, resolution_max_side=size["longest_edge"], input_data_format=input_data_format ) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("size must be a dictionary with key 'longest_edge' or 'height' and 'width'.") image_mode = None if image.ndim == 2 or image.shape[-1] == 1: image_mode = "P" image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format) resized_image = image.resize((size[1], size[0]), resample=resample) resized_image = np.array(resized_image) # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image # so we need to add it back if necessary. resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image # The image is always in channels last format after converting from a PIL image resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.LAST ) return resized_image def split_image( self, image, max_image_size: dict[str, int], resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Split an image into squares of side max_image_size and the original image resized to max_image_size. That means that a single image becomes a sequence of images. This is a "trick" to spend more compute on each image with no changes in the vision encoder. 1) If one side of the original image is larger than `max_image_size`, resize it to `max_image_size` while preserving the aspect ratio. 2) Divide the resulting image into `ceil(height / max_image_size)` x `ceil(width / max_image_size)` sub-images of the same size each (image_size, image_size). Typically, 364x364. 3) Returns the list of the crops and the original image, in addition to the number of splits for the height and the width. Args: image (`np.ndarray`): Images to split. max_image_size (`dict[str, int]`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ height, width = get_image_size(image, channel_dim=input_data_format) max_height = max_width = max_image_size["longest_edge"] frames = [] if height > max_height or width > max_width: # Calculate the number of splits num_splits_h = math.ceil(height / max_height) num_splits_w = math.ceil(width / max_width) # Calculate the optimal width and height for the sub-images optimal_height = math.ceil(height / num_splits_h) optimal_width = math.ceil(width / num_splits_w) # Iterate through each row and column for r in range(num_splits_h): for c in range(num_splits_w): # Calculate the starting point of the crop start_x = c * optimal_width start_y = r * optimal_height # Calculate the ending point of the crop end_x = min(start_x + optimal_width, width) end_y = min(start_y + optimal_height, height) # Crop the image cropped_image = _crop( image, start_x, start_y, end_x, end_y, data_format=data_format, ) frames.append(cropped_image) # For the global image at the end, we resize it to match the max_image_size, for cpu memory efficiency global_image_height, global_image_width = max_height, max_width if height != global_image_height or width != global_image_width: image = self.resize( image, {"height": global_image_height, "width": global_image_width}, resample=resample, input_data_format=data_format, ) else: num_splits_h, num_splits_w = 0, 0 frames.append(image) return frames, num_splits_h, num_splits_w def resize_for_vision_encoder( self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling = PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio. Args: image (`np.ndarray`): Images to resize. vision_encoder_max_size (`int`): Maximum size of the output image. If the image is larger than this size, it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`): Resampling filter to use when resizing the image. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the output image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred """ height, width = get_image_size(image, channel_dim=input_data_format) aspect_ratio = width / height if width >= height: width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size height = int(width / aspect_ratio) height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size elif height > width: height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size width = int(height * aspect_ratio) width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size new_size = {"height": height, "width": width} return self.resize( image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format ) def _pad_image( self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image, channel_dim=input_data_format) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image def pad( self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> BatchFeature: """ For a list of images, for each images, pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width. For each sample in the batch, pads the sample with empty images to the max_number of images per sample in the batch. Optionally returns a pixel mask. Args: images (`list[np.ndarray]`): List of list of images to pad. Pads to the largest height and width in the batch. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ pad_size = get_max_height_width(images, input_data_format=input_data_format) batch_size = len(images) max_num_images = max(len(images_) for images_ in images) input_data_format = ( infer_channel_dimension_format(images[0][0], num_channels=(1, 3, 4)) if input_data_format is None else input_data_format ) data_format = input_data_format if data_format is None else data_format if input_data_format == ChannelDimension.FIRST: n_channels = images[0][0].shape[0] elif input_data_format == ChannelDimension.LAST: n_channels = images[0][0].shape[-1] else: raise ValueError("Invalid channel dimension format.") def empty_image(size, input_data_format): if input_data_format == ChannelDimension.FIRST: return np.zeros((n_channels, *size), dtype=np.uint8) elif input_data_format == ChannelDimension.LAST: return np.zeros((*size, n_channels), dtype=np.uint8) padded_images_list = [ [empty_image(pad_size, data_format) for _ in range(max_num_images)] for _ in range(batch_size) ] padded_masks = [[np.zeros(pad_size, dtype=np.int64) for _ in range(max_num_images)] for _ in range(batch_size)] for batch_idx in range(batch_size): for sample_idx, image in enumerate(images[batch_idx]): padded_images_list[batch_idx][sample_idx] = self._pad_image( image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) padded_masks[batch_idx][sample_idx] = make_pixel_mask( image, output_size=pad_size, input_data_format=input_data_format ) padded_masks = padded_masks if return_pixel_mask else None return padded_images_list, padded_masks def preprocess( self, images: ImageInput, do_convert_rgb: Optional[bool] = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_image_splitting: Optional[bool] = None, do_rescale: Optional[bool] = None, max_image_size: Optional[dict[str, int]] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_pad: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_row_col_info: bool = False, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Preprocess a batch of images. Args: images (`ImageInput`): A list of images to preprocess. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. With the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_image_splitting (`bool`, *optional*, defaults to `self.do_image_splitting`): Whether to split the image into sub-images concatenated with the original image. They are split into patches such that each patch has a size of `max_image_size["height"]` x `max_image_size["width"]`. max_image_size (`Dict`, *optional*, defaults to `self.max_image_size`): Maximum resolution of the images. If the image is larger than this size, the image is split into patches. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether or not to pad the images to the largest height and width in the batch. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. return_row_col_info (`bool`, *optional*, default to `False`): Whether to return the number of rows and columns of the split images. This is used for the `Idefics3Processor` to generate prompt strings based on the number of rows and columns. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_image_splitting = do_image_splitting if do_image_splitting is not None else self.do_image_splitting max_image_size = max_image_size if max_image_size is not None else self.max_image_size do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb do_pad = do_pad if do_pad is not None else self.do_pad images_list = make_nested_list_of_images(images) if not valid_images(images_list[0]): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) # save the palettes for conversion to RGB palettes_list = [ [im.getpalette() if isinstance(im, Image.Image) and im.mode == "P" else None for im in images] for images in images_list ] # All transformations expect numpy arrays. images_list = [[to_numpy_array(image) for image in images] for images in images_list] # Extra channel dimension for grayscale images if input_data_format in [ChannelDimension.LAST, None]: images_list = [ [np.expand_dims(img, axis=-1) if img.ndim == 2 else img for img in images] for images in images_list ] elif input_data_format == ChannelDimension.FIRST: images_list = [ [np.expand_dims(img, axis=0) if img.ndim == 2 else img for img in images] for images in images_list ] if do_rescale and is_scaled_image(images_list[0][0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) # We assume that all images have the same channel dimension format. if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0], num_channels=(1, 3, 4)) if do_resize: images_list = [ [ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images ] for images in images_list ] if do_image_splitting: # We first resize both height and width of each image to the nearest max_image_size multiple, disregarding the aspect ratio # for size=(10, max_image_size) -> rescaled_size=(max_image_size, max_image_size) # for size=(11, max_image_size+1) -> rescaled_size=(max_image_size, max_image_size*2) images_list = [ [ self.resize_for_vision_encoder( image, max_image_size["longest_edge"], resample=resample, input_data_format=input_data_format ) for image in images ] for images in images_list ] images_list_split_arrays = [] palettes_list_split_arrays = [] images_list_rows = [] images_list_cols = [] for images, palettes in zip(images_list, palettes_list): split_image_arrays = [] split_palettes_arrays = [] image_rows = [] image_cols = [] for image, palette in zip(images, palettes): split_image_array, rows, cols = self.split_image( image, max_image_size=max_image_size, resample=resample, input_data_format=input_data_format, ) split_image_arrays.extend(split_image_array) split_palettes_arrays.extend([palette] * len(split_image_array)) image_rows.append(rows) image_cols.append(cols) images_list_split_arrays.append(split_image_arrays) palettes_list_split_arrays.append(split_palettes_arrays) images_list_rows.append(image_rows) images_list_cols.append(image_cols) images_list = images_list_split_arrays palettes_list = palettes_list_split_arrays else: # We square the images to max_image_size images_list = [ [ self.resize( image=image, size={"height": max_image_size["longest_edge"], "width": max_image_size["longest_edge"]}, resample=resample, input_data_format=input_data_format, ) for image in images ] for images in images_list ] images_list_rows = [[0] * len(images) for images in images_list] images_list_cols = [[0] * len(images) for images in images_list] if do_convert_rgb: images_list = [ [convert_to_rgb(img, palette) for img, palette in zip(images, palettes)] for images, palettes in zip(images_list, palettes_list) ] if do_rescale: images_list = [ [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images] for images in images_list ] if do_normalize: images_list = [ [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] for images in images_list ] pixel_attention_mask = None if do_pad: images_list, pixel_attention_mask = self.pad( images_list, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=input_data_format ) if data_format is not None: images_list = [ [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] for images in images_list ] # Faster tensor conversion data = {"pixel_values": np.array(images_list) if do_pad and return_tensors is not None else images_list} if pixel_attention_mask is not None: data["pixel_attention_mask"] = ( np.array(pixel_attention_mask) if do_pad and return_tensors is not None else pixel_attention_mask ) encoding = BatchFeature(data=data, tensor_type=return_tensors) # This is needed for generating correct text inputs in the processor - we don't pad to the max number of images if return_row_col_info: encoding["rows"] = images_list_rows encoding["cols"] = images_list_cols return encoding def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): """ A utility that returns number of image patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image. """ do_image_splitting = images_kwargs.get("do_image_splitting", self.do_image_splitting) max_image_size = images_kwargs.get("max_image_size", self.max_image_size) size = images_kwargs.get("size", self.size) num_patches = num_rows = num_cols = 1 if do_image_splitting: height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=size["longest_edge"]) height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=4096) aspect_ratio = width / height if width >= height: resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"] resized_height = int(width / aspect_ratio) resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"] elif height > width: resized_height = math.ceil(height / max_image_size["longest_edge"]) * max_image_size["longest_edge"] resized_width = int(height * aspect_ratio) resized_width = math.ceil(width / max_image_size["longest_edge"]) * max_image_size["longest_edge"] max_height = max_width = max_image_size["longest_edge"] if resized_height > max_height or resized_width > max_width: # Calculate the number of splits num_rows = math.ceil(resized_height / max_height) num_cols = math.ceil(resized_width / max_width) num_patches = num_rows * num_cols + 1 return num_patches, num_rows, num_cols __all__ = ["Idefics3ImageProcessor"]
transformers/src/transformers/models/idefics3/image_processing_idefics3.py/0
{ "file_path": "transformers/src/transformers/models/idefics3/image_processing_idefics3.py", "repo_id": "transformers", "token_count": 18718 }
481
# coding=utf-8 # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from collections.abc import Iterable from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch from torch import nn from transformers.models.blip.image_processing_blip import BlipImageProcessor from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import ClassifierFreeGuidanceLogitsProcessor, GenerationMixin, GenerationMode, LogitsProcessorList from ...generation.utils import GenerateDecoderOnlyOutput from ...image_processing_utils import BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, ) from ...modeling_outputs import ModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ( TransformersKwargs, auto_docstring, can_return_tuple, is_torch_available, is_vision_available, logging, ) from ..auto import AutoModel from ..blip_2.modeling_blip_2 import Blip2VisionModel from ..chameleon.configuration_chameleon import ChameleonVQVAEConfig from ..chameleon.modeling_chameleon import ( ChameleonVQVAE, ChameleonVQVAEEncoderAttnBlock, ChameleonVQVAEEncoderConvDownsample, ChameleonVQVAEEncoderResnetBlock, ChameleonVQVAEVectorQuantizer, ) from ..idefics.modeling_idefics import IdeficsBaseModelOutputWithPast, IdeficsCausalLMOutputWithPast from ..llama.modeling_llama import eager_attention_forward from ..siglip.configuration_siglip import SiglipVisionConfig from ..siglip.modeling_siglip import SiglipEncoder, SiglipEncoderLayer, SiglipVisionEmbeddings if is_torch_available(): import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint if is_vision_available(): import PIL from ...configuration_utils import PretrainedConfig from ..auto import CONFIG_MAPPING, AutoConfig logger = logging.get_logger(__name__) # General docstring class JanusVisionConfig(SiglipVisionConfig): r""" This is the configuration class to store the configuration of a [`JanusVisionModel`]. It is used to instantiate a `JanusVisionModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. attention_dropout (`float`, *optional*, defaults to 0.0): Dropout probability for attention weights. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, and `"gelu_new"` are supported. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. attention_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys, and values in the attention layers. hidden_dropout_rate (`float`, *optional*, defaults to 0.0): The dropout probability for fully connected layers in the encoder. projection_dim (`int`, *optional*, defaults to 2048): Dimensionality of the MLP projection head. projection_dropout (`float`, *optional*, defaults to 0.0): Dropout probability for the projection layer. use_qk_norm (`bool`, *optional*, defaults to `False`): Whether to normalize the query and key matrices. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal initializer for initializing all weight matrices. depth (`int`, *optional*, defaults to 2): Number of hidden layers in the aligner module. num_image_tokens (`int`, *optional*, defaults to 576): Number of image tokens. """ model_type = "janus_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, num_channels=3, patch_size=16, image_size=384, attention_dropout=0.0, layer_norm_eps=1e-6, hidden_act="gelu", mlp_ratio=4.0, attention_bias=True, hidden_dropout_rate=0.0, projection_dim=2048, projection_dropout=0.0, use_qk_norm=False, initializer_range=0.02, depth=2, num_image_tokens=576, **kwargs, ): super().__init__( hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_channels=num_channels, patch_size=patch_size, image_size=image_size, attention_dropout=attention_dropout, layer_norm_eps=layer_norm_eps, hidden_act=hidden_act, **kwargs, ) del self.intermediate_size self.mlp_ratio = mlp_ratio self.attention_bias = attention_bias self.hidden_dropout_rate = hidden_dropout_rate self.projection_dim = projection_dim self.projection_dropout = projection_dropout self.use_qk_norm = use_qk_norm self.initializer_range = initializer_range self.depth = depth self.num_image_tokens = num_image_tokens class JanusVQVAEConfig(ChameleonVQVAEConfig): r""" This is the configuration class to store the configuration of a [`JanusVQVAEModel`]. It is used to instantiate a `JanusVQVAEModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to the VQModel of the [deepseek-community/Janus-Pro-1B](https://huggingface.co/deepseek-community/Janus-Pro-1B). Args: embed_dim (`int`, *optional*, defaults to 8): Dimensionality of each embedding vector. num_embeddings (`int`, *optional*, defaults to 16384): Number of codebook embeddings. double_latent (`bool`, *optional*, defaults to `False`): Whether to use double z channels. latent_channels (`int`, *optional*, defaults to 256): Number of channels for the latent space. num_patches (`int`, *optional*, defaults to 32): Num of patches the input images can be divided into. in_channels (`int`, *optional*, defaults to 3): Number of input channels. out_channels (`int`, *optional*, defaults to 3): Number of out channels. base_channels (`int`, *optional*, defaults to 128): Base channel count. channel_multiplier (`list[int]`, *optional*, defaults to `[1, 1, 2, 2, 4]`): Channel multipliers for each resolution. num_res_blocks (`int`, *optional*, defaults to 2): Number of residual blocks. dropout (`float`, *optional*, defaults to 0.0): Dropout rate. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. projection_dim (`int`, *optional*, defaults to 2048): Dimensionality of the MLP projection head. num_hidden_layers (`int`, *optional*, defaults to 2): Number of hidden layers in VAVAE MLP Connecter module. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. image_token_embed_dim (`int`, *optional*, defaults to 2048): Dimension of image embeddings. It should be same as the dimensionality of text embeddings. """ def __init__( self, embed_dim: int = 8, num_embeddings: int = 16384, double_latent: bool = False, latent_channels: int = 256, num_patches: int = 32, in_channels: int = 3, out_channels: int = 3, base_channels: int = 128, channel_multiplier: list[int] = [1, 1, 2, 2, 4], num_res_blocks: int = 2, dropout: float = 0.0, initializer_range=0.02, projection_dim=2048, num_hidden_layers=2, hidden_act="gelu", image_token_embed_dim=2048, **kwargs, ): super().__init__( embed_dim=embed_dim, num_embeddings=num_embeddings, double_latent=double_latent, latent_channels=latent_channels, in_channels=in_channels, base_channels=base_channels, channel_multiplier=channel_multiplier, num_res_blocks=num_res_blocks, dropout=dropout, initializer_range=initializer_range, **kwargs, ) self.num_patches = num_patches self.out_channels = out_channels self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.hidden_act = hidden_act self.image_token_embed_dim = image_token_embed_dim del self.resolution del self.attn_resolutions del self.attn_type class JanusConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`JanusModel`]. It is used to instantiate an Janus model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Janus-1B or Janus-7B models. e.g. [deepseek-community/Janus-Pro-1B](https://huggingface.co/deepseek-community/Janus-Pro-1B) or [deepseek-community/Janus-Pro-7B](https://huggingface.co/deepseek-community/Janus-Pro-7B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `LlamaConfig`): The config object or dictionary of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `JanusVisionConfig`): The config object or dictionary of the vision backbone. vq_config (`Union[AutoConfig, dict]`, *optional*, defaults to `JanusVQVAEConfig`): The config object or dictionary of the VQVAE backbone. image_token_id (`int`, *optional*, defaults to 100581): Token index of a placeholder image token. Example: ```python >>> from transformers import JanusForConditionalGeneration, JanusConfig, JanusVisionConfig, JanusVQVAEConfig, LlamaConfig >>> # Initializing a Janus vision config >>> vision_config = JanusVisionConfig() >>> # Initializing a Llama config >>> text_config = LlamaConfig() >>> # Initializing a VQ config >>> vq_config = JanusVQVAEConfig() >>> # Initializing a Janus Pro 1B style configuration >>> configuration = JanusConfig(vision_config=vision_config, text_config=text_config, vq_config=vq_config) >>> # Initializing a model from the Janus Pro 1B style configuration >>> model = JanusForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "janus" sub_configs = { "text_config": AutoConfig, "vision_config": JanusVisionConfig, "vq_config": JanusVQVAEConfig, } def __init__( self, text_config=None, vision_config=None, vq_config=None, image_token_id=100581, **kwargs, ): if isinstance(text_config, dict): text_config["model_type"] = text_config.get("model_type", "llama") self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: logger.info("`text_config` is None. Initializing with default values") self.text_config = CONFIG_MAPPING["llama"]() elif isinstance(text_config, PretrainedConfig): self.text_config = text_config else: raise ValueError( f"Invalid type for `text_config`. Must be either `dict` or `LlamaConfig`." f" Type found: {type(text_config)}" ) if vision_config is None: logger.info("`vision_config` is None. Initializing with default JanusVisionConfig values") self.vision_config = JanusVisionConfig() elif isinstance(vision_config, dict): self.vision_config = JanusVisionConfig(**vision_config) elif isinstance(vision_config, JanusVisionConfig): self.vision_config = vision_config else: raise ValueError( f"Invalid type for `vision_config`. Must be either `dict` or `JanusVisionConfig`." f" Type found: {type(vision_config)}" ) if vq_config is None: logger.info("`vq_config` is None. Initializing with default JanusVQVAEConfig values") self.vq_config = JanusVQVAEConfig() elif isinstance(vq_config, dict): self.vq_config = JanusVQVAEConfig(**vq_config) elif isinstance(vq_config, JanusVQVAEConfig): self.vq_config = vq_config else: raise ValueError( f"Invalid type for `vq_config`. Must be either `dict` or `JanusVQVAEConfig`." f" Type found: {type(vq_config)}" ) self.initializer_range = self.vision_config.initializer_range # This dimension is required when decoding discrete image tokens to continuous input. self.vq_config.num_patches = self.vision_config.image_size // self.vision_config.patch_size # The default is only the index for the 1B model, 7B uses a different one self.image_token_id = image_token_id super().__init__(**kwargs) @auto_docstring class JanusPreTrainedModel(PreTrainedModel): config: JanusConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer", "JanusVisionEncoderLayer"] _skip_keys_device_placement = ["past_key_values", "causal_mask"] _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_param_buffer_assignment = False @dataclass @auto_docstring( custom_intro=""" Base class for Janus VQ-VAE mode model outputs. """ ) class JanusVQVAEOutput(ModelOutput): r""" decoded_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): Reconstructed pixel values after encoding and decoding the input. embedding_loss (`torch.FloatTensor`): Embedding loss. """ decoded_pixel_values: Optional[torch.FloatTensor] = None embedding_loss: torch.FloatTensor = None class JanusBaseModelOutputWithPast(IdeficsBaseModelOutputWithPast): pass class JanusCausalLMOutputWithPast(IdeficsCausalLMOutputWithPast): pass class JanusVisionEmbeddings(SiglipVisionEmbeddings): def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: _, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] embeddings = patch_embeds.flatten(2).transpose(1, 2) if interpolate_pos_encoding: pos_embeds = self.interpolate_pos_encoding(embeddings, height, width) else: pos_embeds = self.position_embedding(self.position_ids) embeddings = embeddings + pos_embeds return embeddings class JanusVisionAttention(nn.Module): """Attention Class for Janus Vision Encoder""" def __init__(self, config: JanusVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout proj_dropout = config.projection_dropout qk_norm = config.use_qk_norm self.is_causal = False # Janus has no MHA, hence for `eager_attention_forward` call setting `num_key_value_groups` to 1. self.num_key_value_groups = 1 self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias) self.projection_layer = nn.Linear(self.embed_dim, self.embed_dim) self.projection_dropout = nn.Dropout(proj_dropout) if proj_dropout > 0 else nn.Identity() self.q_norm = nn.LayerNorm(self.embed_dim) if qk_norm else nn.Identity() self.k_norm = nn.LayerNorm(self.embed_dim) if qk_norm else nn.Identity() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ): batch_size, seq_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) query_states = self.q_norm(query_states) key_states = key_states.reshape(-1, self.num_heads, self.head_dim) key_states = self.k_norm(key_states) query_states = query_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape(batch_size, seq_len, self.embed_dim) output = self.projection_layer(attn_output) output = self.projection_dropout(output) return output, attn_weights class JanusVisionMLP(nn.Module): def __init__(self, config: JanusVisionConfig): super().__init__() self.config = config self.intermediate_size = int(config.hidden_size * config.mlp_ratio) self.activation_fn = ACT2FN[config.hidden_act] # Gelu act self.fc1 = nn.Linear(config.hidden_size, self.intermediate_size) self.fc2 = nn.Linear(self.intermediate_size, config.hidden_size) self.dropout1 = nn.Dropout(config.hidden_dropout_rate) self.dropout2 = nn.Dropout(config.hidden_dropout_rate) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.dropout1(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout2(hidden_states) return hidden_states class JanusVisionEncoderLayer(SiglipEncoderLayer): def __init__(self, config: JanusVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.self_attn = JanusVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = JanusVisionMLP(config) class JanusVisionEncoder(SiglipEncoder): def __init__(self, config: JanusVisionConfig): super().__init__(config) self.layers = nn.ModuleList([JanusVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) class JanusVisionModel(Blip2VisionModel): def __init__(self, config: JanusVisionConfig): super().__init__(config) self.encoder = JanusVisionEncoder(config) class JanusVisionAlignerMLP(nn.Module): def __init__(self, config: JanusVisionConfig): super().__init__() self.fc1 = nn.Linear(config.hidden_size, config.projection_dim) self.hidden_layers = nn.ModuleList( [nn.Linear(config.projection_dim, config.projection_dim) for _ in range(1, config.depth)] ) self.activation_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) for layer in self.hidden_layers: hidden_states = self.activation_fn(hidden_states) hidden_states = layer(hidden_states) return hidden_states class JanusVQVAEVectorQuantizer(ChameleonVQVAEVectorQuantizer): def __init__(self, config: JanusVQVAEConfig): super().__init__(config) self.quant_state_dims = [config.num_patches] * 2 def get_codebook_entry(self, image_tokens: torch.LongTensor) -> torch.FloatTensor: batch_size = image_tokens.shape[0] emb_dim: int = self.embedding.weight.shape[-1] # get quantized latent vectors hidden_state_quant = self.embedding(image_tokens) # l2 normalization on the last dimension hidden_state_quant = F.normalize(hidden_state_quant, p=2, dim=-1) # reshape back to match original input shape hidden_state_quant = hidden_state_quant.view((batch_size, *self.quant_state_dims, emb_dim)) hidden_state_quant = hidden_state_quant.permute(0, 3, 1, 2).contiguous() return hidden_state_quant class JanusVQVAEResnetBlock(ChameleonVQVAEEncoderResnetBlock): pass class JanusVQVAEAttnBlock(ChameleonVQVAEEncoderAttnBlock): pass class JanusVQVAEConvDownsample(ChameleonVQVAEEncoderConvDownsample): pass class JanusVQVAEConvUpsample(nn.Module): def __init__(self, in_channels): super().__init__() self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_states): hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest") hidden_states = self.conv(hidden_states) return hidden_states class JanusVQVAEMidBlock(nn.Module): def __init__(self, config: JanusVQVAEConfig, channels: int): super().__init__() self.block_1 = JanusVQVAEResnetBlock( config=config, in_channels=channels, out_channels=channels, ) self.attn_1 = JanusVQVAEAttnBlock(channels) self.block_2 = JanusVQVAEResnetBlock( config=config, in_channels=channels, out_channels=channels, ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.block_1(hidden_states) hidden_states = self.attn_1(hidden_states) hidden_states = self.block_2(hidden_states) return hidden_states class JanusVQVAEEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_resolutions = len(config.channel_multiplier) self.num_res_blocks = config.num_res_blocks base_channels = config.base_channels in_channels = config.in_channels double_latent = config.double_latent latent_channels = config.latent_channels channel_multiplier = config.channel_multiplier self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1) in_channel_multiplier = (1,) + tuple(channel_multiplier) self.in_channel_multiplier = in_channel_multiplier self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() block_in = base_channels * in_channel_multiplier[i_level] block_out = base_channels * channel_multiplier[i_level] for i_block in range(self.num_res_blocks): block.append( JanusVQVAEResnetBlock( config=config, in_channels=block_in, out_channels=block_out, ) ) block_in = block_out if i_level == self.num_resolutions - 1: attn.append(JanusVQVAEAttnBlock(block_in)) down = nn.Module() down.block = block down.attn = attn if i_level != self.num_resolutions - 1: down.downsample = JanusVQVAEConvDownsample(block_in) self.down.append(down) self.mid = JanusVQVAEMidBlock(config, block_in) self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) self.conv_out = torch.nn.Conv2d( block_in, 2 * latent_channels if double_latent else latent_channels, kernel_size=3, stride=1, padding=1, ) def forward(self, pixel_values: torch.LongTensor): # downsampling hidden_states = [self.conv_in(pixel_values)] for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks): hidden_state = self.down[i_level].block[i_block]( hidden_states[-1], ) if len(self.down[i_level].attn) > 0: hidden_state = self.down[i_level].attn[i_block](hidden_state) hidden_states.append(hidden_state) if i_level != self.num_resolutions - 1: hidden_states.append(self.down[i_level].downsample(hidden_states[-1])) # middle last_hidden_state = hidden_states[-1] last_hidden_state = self.mid(last_hidden_state) # end last_hidden_state = self.norm_out(last_hidden_state) last_hidden_state *= torch.sigmoid(last_hidden_state) last_hidden_state = self.conv_out(last_hidden_state) return last_hidden_state class JanusVQVAEDecoder(nn.Module): def __init__(self, config): super().__init__() self.num_resolutions = len(config.channel_multiplier) self.num_res_blocks = config.num_res_blocks base_channels = config.base_channels latent_channels = config.latent_channels out_channels = config.out_channels # compute in_ch_mult, block_in and curr_res at lowest res block_in = base_channels * config.channel_multiplier[self.num_resolutions - 1] # z to block_in self.conv_in = torch.nn.Conv2d(latent_channels, block_in, kernel_size=3, stride=1, padding=1) # middle self.mid = JanusVQVAEMidBlock(config, block_in) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() block_out = base_channels * config.channel_multiplier[i_level] for i_block in range(self.num_res_blocks + 1): block.append( JanusVQVAEResnetBlock( config=config, in_channels=block_in, out_channels=block_out, ) ) block_in = block_out if i_level == self.num_resolutions - 1: attn.append(JanusVQVAEAttnBlock(block_in)) up = nn.Module() up.block = block up.attn = attn if i_level != 0: up.upsample = JanusVQVAEConvUpsample(block_in) self.up.append(up) # end self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True) self.conv_out = torch.nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1) def forward(self, hidden_state: torch.FloatTensor) -> torch.FloatTensor: hidden_state = self.conv_in(hidden_state) # middle hidden_state = self.mid(hidden_state) # upsampling for i_level in range(self.num_resolutions): for i_block in range(self.num_res_blocks + 1): hidden_state = self.up[i_level].block[i_block](hidden_state) if len(self.up[i_level].attn) > 0: hidden_state = self.up[i_level].attn[i_block](hidden_state) if i_level != self.num_resolutions - 1: hidden_state = self.up[i_level].upsample(hidden_state) hidden_state = self.norm_out(hidden_state) hidden_state *= torch.sigmoid(hidden_state) hidden_state = self.conv_out(hidden_state) return hidden_state class JanusVQVAE(ChameleonVQVAE): _no_split_modules = [ "JanusVQVAEAttnBlock", "JanusVQVAEResnetBlock", "JanusVQVAEVectorQuantizer", ] main_input_name = "pixel_values" def __init__(self, config: JanusVQVAEConfig): super().__init__(config) self.decoder = JanusVQVAEDecoder(config) self.gradient_checkpointing = False # Initialize the VQVAE model. self.post_init() def decode(self, image_tokens: torch.LongTensor) -> torch.FloatTensor: """ Decodes quantized token IDs into pixel values. Args: image_tokens (torch.LongTensor): Batch of token IDs. Returns: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): Pixel values decoded from the token IDs. """ if image_tokens.shape[1] != self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]: raise ValueError( f"Expected `image_tokens` to have shape `(batch_size, {self.quantize.quant_state_dims[0] * self.quantize.quant_state_dims[1]})`, " f"but got shape `{image_tokens.shape}`." ) codebook_entry = self.quantize.get_codebook_entry(image_tokens) hidden_states = self.post_quant_conv(codebook_entry) pixel_values = self.decoder(hidden_states) return pixel_values @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, ) -> tuple[torch.FloatTensor, torch.FloatTensor]: batch_size = pixel_values.shape[0] quant, embedding_loss, indices = self.encode(pixel_values) decoded_pixel_values = self.decode(indices.view(batch_size, -1)) return JanusVQVAEOutput(decoded_pixel_values, embedding_loss) class JanusVQVAEAlignerMLP(nn.Module): def __init__(self, config: JanusVQVAEConfig): super().__init__() self.fc1 = nn.Linear(config.embed_dim, config.projection_dim) self.hidden_layers = nn.ModuleList( [nn.Linear(config.projection_dim, config.projection_dim) for _ in range(1, config.num_hidden_layers)] ) self.activation_fn = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) for layer in self.hidden_layers: hidden_states = self.activation_fn(hidden_states) hidden_states = layer(hidden_states) return hidden_states class JanusVQVAEHead(nn.Module): """Head used for sampling tokens in image generation, replacing the usual lm head.""" def __init__(self, config: JanusVQVAEConfig): super().__init__() self.proj_out = nn.Linear(config.image_token_embed_dim, config.projection_dim) self.activation_fn = ACT2FN[config.hidden_act] self.vision_head = nn.Linear(config.projection_dim, config.num_embeddings) def forward(self, hidden_states: torch.Tensor) -> torch.tensor: hidden_states = self.proj_out(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.vision_head(hidden_states) return hidden_states @auto_docstring( custom_intro=""" The Janus model which consists of a siglip vision backbone, a Llama language model and a VQ model. """ ) class JanusModel(JanusPreTrainedModel): def __init__(self, config: JanusConfig): super().__init__(config) self.config = config # This is necessary for backward compatibility, see SiglipModel initialization self.vision_model = JanusVisionModel._from_config(config.vision_config) self.aligner = JanusVisionAlignerMLP(self.vision_model.config) self.vqmodel = JanusVQVAE._from_config(config.vq_config) # Below generation_* modules are used for Image generation. # Embeddings used for image generation, instead of Janus vision embeddings. self.generation_embeddings = nn.Embedding(self.vqmodel.config.num_embeddings, self.vqmodel.config.embed_dim) self.generation_aligner = JanusVQVAEAlignerMLP(self.vqmodel.config) self.generation_head = JanusVQVAEHead(self.vqmodel.config) self.language_model = AutoModel.from_config(config=config.text_config) self.gradient_checkpointing = False # Initialize weights and apply final processing. self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def get_image_features(self, pixel_values): image_embeds = self.vision_model(pixel_values) image_embeds = self.aligner(image_embeds.last_hidden_state) return image_embeds def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) if inputs_embeds[special_image_mask].numel() != image_features.numel(): n_image_features = image_features.shape[0] * image_features.shape[1] raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) return special_image_mask @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ): if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_embeds = self.get_image_features(pixel_values) image_features = image_embeds.reshape(-1, inputs_embeds.shape[-1]) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) image_attention_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(image_attention_mask, image_features) lm_output = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) return JanusBaseModelOutputWithPast( last_hidden_state=lm_output.last_hidden_state, past_key_values=lm_output.past_key_values, hidden_states=lm_output.hidden_states, attentions=lm_output.attentions, image_hidden_states=image_embeds if pixel_values is not None else None, ) class JanusForConditionalGeneration(JanusPreTrainedModel, GenerationMixin): _tied_weights_keys = ["model.language_model.embed_tokens.weight", "lm_head.weight"] _can_compile_fullgraph = True def __init__(self, config: JanusConfig): super().__init__(config) self.config = config self.model = JanusModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) # Initialize weights and apply final processing. self.post_init() def get_input_embeddings(self): return self.model.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.model.language_model.set_input_embeddings(value) def prepare_embeddings_for_image_generation(self, inputs: torch.Tensor) -> torch.Tensor: hidden_state = self.model.generation_embeddings(inputs) hidden_state = self.model.generation_aligner(hidden_state) return hidden_state def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return JanusCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, logits_to_keep=None, **kwargs, ): # Overwritten -- extra custom processing model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model if cache_position[0] == 0: model_inputs["pixel_values"] = pixel_values return model_inputs def decode_image_tokens(self, image_tokens: torch.Tensor): """ Decodes generated image tokens from language model to continuous pixel values with VQGAN module via upsampling. Args: image_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`): The tensors corresponding to the input images. """ decoded_image = self.model.vqmodel.decode(image_tokens) decoded_image = decoded_image.permute(0, 2, 3, 1) return decoded_image @torch.no_grad def generate( self, inputs: torch.Tensor = None, attention_mask: Optional[torch.LongTensor] = None, logits_processor: Optional[LogitsProcessorList] = None, **kwargs, ): # 1. Handle generation config and model kwargs generation_config = kwargs.pop("generation_config", self.generation_config) generation_config = copy.deepcopy(generation_config) # Default to "text" generation if mode isn't provided generation_mode = kwargs.pop("generation_mode", "text") if generation_mode == "text": # Set guidance_scale=None to prevent running UnbatchedCFG processor. return super().generate( inputs=inputs, attention_mask=attention_mask, generation_config=generation_config, guidance_scale=None, **kwargs, ) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs # Validate generation mode if generation_config.get_generation_mode() not in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH): raise ValueError( "Got incompatible mode for Image Generation, should be one of greedy or sampling. " "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`." ) # Validate the configuration and model kwargs generation_config.validate() self._validate_model_kwargs(model_kwargs.copy()) # 2. Initialize logit processors logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() # Set `use_cache=True` as we will be using input embeds for generation. model_kwargs["use_cache"] = True if generation_config.guidance_scale is None: logger.warning("`guidance_scale` is required for CFG but not provided. Setting to default value of 5.") generation_config.guidance_scale = 5 model_kwargs["guidance_scale"] = generation_config.guidance_scale # 3. Prepare model inputs input_ids, model_input_name, model_kwargs = self._prepare_model_inputs( inputs, generation_config.bos_token_id, model_kwargs ) dtype, device = input_ids.dtype, input_ids.device if len(input_ids.shape) != 2: raise ValueError( f"Expected input ids of shape (batch_size, seq_len), but got {input_ids.shape}" "Passing `inputs embeds` is not supported currently." ) # Prepare special tokens which will be used generate internally. kwargs_has_attention_mask = attention_mask is not None self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device) # 4. Add CFG processor along with user passed logit processor. if generation_config.guidance_scale and generation_config.guidance_scale > 1: logits_processor.append(ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)) generation_config.guidance_scale = None # Reset to prevent processor duplication. # 5. Prepare logits processor logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids.shape[1], encoder_input_ids=input_ids, prefix_allowed_tokens_fn=None, logits_processor=logits_processor, device=device, ) # 6. Expand inputs for multiple image generations per prompt. input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids=input_ids, attention_mask=attention_mask, expand_size=generation_config.num_return_sequences, **model_kwargs, ) # 7. Prepare input and model caches num_image_tokens = self.model.vision_model.config.num_image_tokens batch_size, seq_len = input_ids.shape input_tokens = input_ids.repeat(2, 1) # Double batch size for conditional/unconditional logits attention_mask = model_kwargs.pop("attention_mask", None) attention_mask = attention_mask.repeat(2, 1) model_kwargs["attention_mask"] = attention_mask # Mask all the tokens that are neither BOS nor BOI with pad token in the unconditional logits. mask = (input_tokens[batch_size:, :] != generation_config.bos_token_id) & ( input_tokens[batch_size:, :] != generation_config.generation_kwargs["boi_token_id"] ) input_tokens[batch_size:, :].masked_fill_(mask, generation_config.pad_token_id) inputs_embeds = self.get_input_embeddings()(input_tokens) model_kwargs = self._get_initial_cache_position(seq_len, device, model_kwargs) if model_kwargs.get("past_key_values", None) is None: # Prepare cache if not provided. model_kwargs["past_key_values"] = self._get_cache( cache_implementation=generation_config.cache_implementation or "static", # batch_size should account for both conditional/unconditional input; hence multiplied by 2. batch_size=batch_size * 2, # we should have at least a cache len of seq_len + num_image_tokens. max_cache_len=max(generation_config.max_length, num_image_tokens + seq_len), model_kwargs=model_kwargs, ) # Placeholder for generated tokens. generated_tokens = torch.zeros((batch_size, num_image_tokens), dtype=dtype, device=device) # 8. init attention / hidden states / scores tuples output_attentions = generation_config.output_attentions output_hidden_states = generation_config.output_hidden_states output_scores = generation_config.output_scores output_logits = generation_config.output_logits return_dict_in_generate = generation_config.return_dict_in_generate raw_scores = () if (return_dict_in_generate and output_scores) else None raw_logits = () if (return_dict_in_generate and output_logits) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None for i in range(num_image_tokens): model_inputs = self.prepare_inputs_for_generation( inputs_embeds=inputs_embeds, input_ids=input_tokens, **model_kwargs ) model_inputs["attention_mask"] = model_inputs["attention_mask"].to(inputs_embeds.device) model_inputs["cache_position"] = model_inputs["cache_position"].to(inputs_embeds.device) outputs = self.model.language_model( **model_inputs, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) # Update model_kwargs like cache_position for next generation. model_kwargs = self._update_model_kwargs_for_generation(outputs, model_kwargs) hidden_state = outputs.last_hidden_state[:, -1, :].clone() # Generate scores using the generation head (Not using above defined LM Head) scores = self.model.generation_head(hidden_state) next_token_scores = logits_processor(input_ids, scores) # Sample next token. if generation_config.do_sample: probs = torch.softmax(next_token_scores, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(-1) else: next_token = torch.argmax(next_token_scores, dim=-1) generated_tokens[:, i] = next_token # Prepare embeddings for the next step. next_token = torch.cat([next_token, next_token]) next_token = next_token.unsqueeze(-1) inputs_embeds = self.prepare_embeddings_for_image_generation(next_token) if return_dict_in_generate: if output_scores: raw_scores += (scores,) if output_logits: raw_logits += (hidden_state.float(),) if output_attentions: decoder_attentions += outputs.attentions if output_hidden_states: decoder_hidden_states += outputs.hidden_states if return_dict_in_generate: return GenerateDecoderOnlyOutput( sequences=generated_tokens, scores=scores, logits=raw_logits, attentions=decoder_attentions, hidden_states=decoder_hidden_states, past_key_values=outputs.past_key_values, ) else: return generated_tokens class JanusImageProcessor(BlipImageProcessor): r""" Constructs a JANUS image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width falls below this value after resizing. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, min_size: int = 14, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, **kwargs, ): super().__init__(**kwargs) self.min_size = min_size if image_mean is None: self.background_color = (127, 127, 127) else: self.background_color = tuple(int(x * 255) for x in image_mean) def pad_to_square( self, image: np.ndarray, background_color: Union[int, tuple[int, int, int]] = 0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Pads an image to a square based on the longest edge. Args: image (`np.ndarray`): The image to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The padded image. """ height, width = get_image_size(image, input_data_format) num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1] if height == width: image = ( to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image ) return image max_dim = max(height, width) # Ensure background_color is the correct shape if isinstance(background_color, int): background_color = [background_color] elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) if input_data_format == ChannelDimension.FIRST: result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype) for i, color in enumerate(background_color): result[i, :, :] = color if width > height: start = (max_dim - height) // 2 result[:, start : start + height, :] = image else: start = (max_dim - width) // 2 result[:, :, start : start + width] = image else: result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype) for i, color in enumerate(background_color): result[:, :, i] = color if width > height: start = (max_dim - height) // 2 result[start : start + height, :, :] = image else: start = (max_dim - width) // 2 result[:, start : start + width, :] = image return result def resize( self, image: np.ndarray, size: Union[dict[str, int], int], background_color: Optional[tuple[int, int, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to dynamically calculated size. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]` or `int`): The size to resize the image to. If a dictionary, it should have the keys `"height"` and `"width"`. background_color (`tuple[int, int, int]`): The background color to use for the padding. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `None`: will be inferred from input input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ background_color = background_color if background_color is not None else self.background_color if input_data_format is None: input_data_format = infer_channel_dimension_format(image) height, width = get_image_size(image, input_data_format) max_size = max(height, width) size = get_size_dict(size, default_to_square=True) if size["height"] != size["width"]: raise ValueError( f"Output height and width must be the same. Got height={size['height']} and width={size['width']}" ) size = size["height"] delta = size / max_size # Largest side becomes `size` and the other side is scaled according to the aspect ratio. output_size_nonpadded = [ max(int(height * delta), self.min_size), max(int(width * delta), self.min_size), ] image = resize( image, size=output_size_nonpadded, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) # Expand and pad the images to obtain a square image of dimensions `size x size` image = self.pad_to_square( image=image, background_color=background_color, input_data_format=input_data_format, ) return image def postprocess( self, images: ImageInput, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[list[float]] = None, image_std: Optional[list[float]] = None, input_data_format: Optional[str] = None, return_tensors: Optional[str] = None, ): """Applies post-processing to the decoded image tokens by reversing transformations applied during preprocessing.""" do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) # Ensures input is a list if isinstance(images[0], PIL.Image.Image): return images if len(images) > 1 else images[0] if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) # Determine format dynamically pixel_values = [] for image in images: image = to_numpy_array(image) # Ensure NumPy format if do_normalize: image = self.unnormalize( image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format ) if do_rescale: image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) image = image.clip(0, 255).astype(np.uint8) if do_normalize and do_rescale and return_tensors == "PIL.Image.Image": image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format) image = PIL.Image.fromarray(image) pixel_values.append(image) data = {"pixel_values": pixel_values} return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None return BatchFeature(data=data, tensor_type=return_tensors) def unnormalize( self, image: np.array, image_mean: Union[float, Iterable[float]], image_std: Union[float, Iterable[float]], input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image * image_std) + image_mean Args: image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`): Batch of pixel values to postprocess. image_mean (`float` or `Iterable[float]`): The mean to use for unnormalization. image_std (`float` or `Iterable[float]`): The standard deviation to use for unnormalization. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ num_channels = 3 if isinstance(image_mean, Iterable): if len(image_mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}") else: image_mean = [image_mean] * num_channels if isinstance(image_std, Iterable): if len(image_std) != num_channels: raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}") else: image_std = [image_std] * num_channels rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std)) rev_image_std = tuple(1 / std for std in image_std) image = self.normalize( image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format ) return image __all__ = [ "JanusImageProcessor", "JanusPreTrainedModel", "JanusForConditionalGeneration", "JanusModel", "JanusVQVAE", "JanusVisionModel", "JanusVQVAEConfig", "JanusVisionConfig", "JanusConfig", ]
transformers/src/transformers/models/janus/modular_janus.py/0
{ "file_path": "transformers/src/transformers/models/janus/modular_janus.py", "repo_id": "transformers", "token_count": 29966 }
482
# coding=utf-8 # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Kosmos2_5. """ from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from ...tokenization_utils_base import TextInput from ...utils import is_torch_available if is_torch_available(): import torch class Kosmos2_5ImagesKwargs(ImagesKwargs, total=False): max_patches: Optional[int] num_image_tokens: Optional[int] class Kosmos2_5ProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: TextKwargs images_kwargs: Kosmos2_5ImagesKwargs _defaults = { "text_kwargs": { "padding": True, "return_token_type_ids": False, "stride": 0, "truncation": True, }, "images_kwargs": { "max_patches": 4096, "num_image_tokens": 2048, }, "common_kwargs": {"return_tensors": "pt"}, } class Kosmos2_5Processor(ProcessorMixin): r""" Constructs a Kosmos2_5 processor which wraps a PreTrainedTokenizerFast and Kosmos2_5 image processor into a single processor. [`Kosmos2_5Processor`] offers all the functionalities of [`Kosmos2_5ImageProcessor`] and [`PreTrainedTokenizerFast`]. See the docstring of [`~Kosmos2_5Processor.__call__`] and [`~Kosmos2_5Processor.decode`] for more information. Args: image_processor (`Kosmos2_5ImageProcessor`): An instance of [`Kosmos2_5ImageProcessor`]. The image processor is a required input. tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]): An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "PreTrainedTokenizerFast" def __init__(self, image_processor, tokenizer): self.image_start_token = tokenizer.boi_token # "" : fixed token for the end of image self.image_token = tokenizer.image_token # "<s>" : within a  pair, these <s> tokens indicate they are positions reserved for an image super().__init__(image_processor, tokenizer) def __call__( self, images: ImageInput = None, text: Union[TextInput, list[TextInput]] = None, audio=None, videos=None, **kwargs: Unpack[Kosmos2_5ProcessorKwargs], ) -> BatchFeature: """ This method uses [`Kosmos2_5ImageProcessor.preprocess`] method to prepare image(s) for the model, and [`PreTrainedTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. The rest of this documentation shows the arguments specific to `Kosmos2_5Processor`. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") if images is None: raise ValueError("Kosmos2_5Processor requires images to be passed.") output_kwargs = self._merge_kwargs( Kosmos2_5ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) num_image_tokens = output_kwargs["images_kwargs"].setdefault("num_image_tokens", None) encoding = BatchFeature() if images is not None: image_encoding = self.image_processor(images, **output_kwargs["images_kwargs"]) image_encoding.pop("rows") image_encoding.pop("cols") encoding.update(image_encoding) prompt = f"{self.tokenizer.bos_token}{self.image_start_token}{self.image_token * num_image_tokens}{self.image_end_token}" if text is not None: if isinstance(text, str): text = [prompt + text] else: text = [prompt + t for t in text] input = self.tokenizer(text, **output_kwargs["text_kwargs"]) batch_size, seq_len = input.input_ids.shape image_embeds_position_mask = [0, -1] + [1] * num_image_tokens + [-1] image_embeds_position_mask += [0] * (seq_len - len(image_embeds_position_mask)) image_embeds_position_mask = ( torch.LongTensor(image_embeds_position_mask).unsqueeze(0).repeat(batch_size, 1) ) encoding.update( { "input_ids": input.input_ids, "attention_mask": input.attention_mask, "image_embeds_position_mask": image_embeds_position_mask, } ) return encoding def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to Kosmos2_5TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to Kosmos2_5TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) __all__ = ["Kosmos2_5Processor"]
transformers/src/transformers/models/kosmos2_5/processing_kosmos2_5.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2_5/processing_kosmos2_5.py", "repo_id": "transformers", "token_count": 2611 }
483
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus and _encode_plus, in which the Rust tokenizer is used. """ import json from typing import Optional, Union from tokenizers import processors from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PaddingStrategy, PreTokenizedInput, TensorType, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import add_end_docstrings, logging from .tokenization_layoutlmv3 import ( LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv3Tokenizer, ) logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (RoBERTa tokenizer detect beginning of words by the preceding space). trim_offsets (`bool`, *optional*, defaults to `True`): Whether the post processing step should trim offsets to avoid including whitespaces. cls_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [SEP] token. pad_token_box (`list[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = LayoutLMv3Tokenizer def __init__( self, vocab_file=None, merges_file=None, tokenizer_file=None, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=True, trim_offsets=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs, ): super().__init__( vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) tokenizer_component = "post_processor" tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) if tokenizer_component_instance: state = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: state["sep"] = tuple(state["sep"]) if "cls" in state: state["cls"] = tuple(state["cls"]) changes_to_apply = False if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: state["add_prefix_space"] = add_prefix_space changes_to_apply = True if state.get("trim_offsets", trim_offsets) != trim_offsets: state["trim_offsets"] = trim_offsets changes_to_apply = True if changes_to_apply: component_class = getattr(processors, state.pop("type")) new_value = component_class(**state) setattr(self.backend_tokenizer, tokenizer_component, new_value) # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.__call__ def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, list[PreTokenizedInput]]] = None, boxes: Optional[Union[list[list[int]], list[list[list[int]]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must be of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.batch_encode_plus def batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[Union[list[int], list[list[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.tokenize def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]: batched_input = [(text, pair)] if pair else [text] encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.encode_plus def encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, `__call__` should be used instead. Args: text (`str`, `List[str]`, `List[List[str]]`): The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. text_pair (`List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a list of list of strings (words of a batch of examples). """ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs, ) return self._encode_plus( text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ list[TextInput], list[TextInputPair], list[PreTokenizedInput], ], is_pair: Optional[bool] = None, boxes: Optional[list[list[list[int]]]] = None, word_labels: Optional[list[list[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv3 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: tuple[ # list[dict[str, list[list[int]]]] or list[dict[str, 2D-Tensor]], # list[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0]: stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] previous_token_empty = False for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0 and not previous_token_empty: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) if offset == (0, 0): previous_token_empty = True else: previous_token_empty = False else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._encode_plus def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[list[list[int]]] = None, word_labels: Optional[list[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._pad def _pad( self, encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side: The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) padding_side = padding_side if padding_side is not None else self.padding_side if padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(padding_side)) return encoded_inputs # Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return output return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Args: Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not: make use of token type ids, therefore a list of zeros is returned. token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] __all__ = ["LayoutLMv3TokenizerFast"]
transformers/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py", "repo_id": "transformers", "token_count": 18504 }
484
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for LeViT.""" from ...image_processing_utils_fast import BaseImageProcessorFast, SizeDict from ...image_transforms import ( ChannelDimension, get_resize_output_image_size, ) from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling from ...utils import auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F @auto_docstring class LevitImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"shortest_edge": 224} default_to_square = False crop_size = {"height": 224, "width": 224} do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = None def resize( self, image: torch.Tensor, size: SizeDict, interpolation: "F.InterpolationMode" = None, **kwargs, ) -> torch.Tensor: """ Resize an image. If size is a dict with keys "width" and "height", the image will be resized to `(size["height"], size["width"])`. If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to `int(c * (256/224))`. The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to `(size["shortest_egde"] * height / width, size["shortest_egde"])`. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Size of the output image after resizing. If size is a dict with keys "width" and "height", the image will be resized to (height, width). If size is a dict with key "shortest_edge", the shortest edge value `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled to (size * height / width, size). interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`): Resampling filter to use when resiizing the image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BICUBIC if size.shortest_edge: shortest_edge = int((256 / 224) * size["shortest_edge"]) new_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST ) elif size.height and size.width: new_size = (size.height, size.width) else: raise ValueError( f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size.keys()} {size.keys()}." ) return F.resize( image, size=new_size, interpolation=interpolation, **kwargs, ) __all__ = ["LevitImageProcessorFast"]
transformers/src/transformers/models/levit/image_processing_levit_fast.py/0
{ "file_path": "transformers/src/transformers/models/levit/image_processing_levit_fast.py", "repo_id": "transformers", "token_count": 1555 }
485
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for LLaVa.""" from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, get_image_size, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, is_vision_available, ) if is_vision_available(): from ...image_utils import PILImageResampling if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class LlavaFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ Args: do_pad (`bool`, *optional*): Whether to pad the image to a square based on the longest edge. """ do_pad: Optional[bool] @auto_docstring class LlavaImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BICUBIC image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"shortest_edge": 224} default_to_square = False crop_size = {"height": 224, "width": 224} do_pad = False do_resize = True do_center_crop = True do_rescale = True do_normalize = True do_convert_rgb = True valid_kwargs = LlavaFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> None: super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[LlavaFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def pad_to_square( self, images: "torch.Tensor", background_color: Union[int, tuple[int, int, int]] = 0, ) -> "torch.Tensor": """ Pads an image to a square based on the longest edge. Args: images (`np.ndarray`): The images to pad. background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0): The color to use for the padding. Can be an integer for single channel or a tuple of integers representing for multi-channel images. If passed as integer in mutli-channel mode, it will default to `0` in subsequent channels. Returns: `torch.Tensor`: The padded images. """ height, width = get_image_size(images, ChannelDimension.FIRST) if height == width: return images num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0] if isinstance(background_color, int): background_color = [background_color] + [0] * (num_channels - 1) elif len(background_color) != num_channels: raise ValueError( f"background_color must have no more than {num_channels} elements to match the number of channels" ) max_dim = max(height, width) paste_x_left = (max_dim - width) // 2 paste_y_left = (max_dim - height) // 2 paste_x_right = max_dim - width - paste_x_left paste_y_right = max_dim - height - paste_y_left padded_images = F.pad( images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color ) return padded_images def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_pad: bool, do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_pad: stacked_images = self.pad_to_square( images=stacked_images, background_color=tuple(int(x * 255) for x in self.image_mean) ) resized_images_grouped[shape] = stacked_images padded_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for batched resizing # Needed in case do_pad is False, or padding returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(padded_images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_center_crop: stacked_images = self.center_crop(stacked_images, crop_size) # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["LlavaImageProcessorFast"]
transformers/src/transformers/models/llava/image_processing_llava_fast.py/0
{ "file_path": "transformers/src/transformers/models/llava/image_processing_llava_fast.py", "repo_id": "transformers", "token_count": 2901 }
486
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for LLaVa-NeXT-Video. """ from typing import Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import select_best_resolution from ...image_utils import ImageInput, get_image_size, to_numpy_array from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...utils import logging from ...video_utils import VideoInput logger = logging.get_logger(__name__) class LlavaNextVideoProcessorKwargs(ProcessingKwargs, total=False): # see processing_utils.ProcessingKwargs documentation for usage. _defaults = { "text_kwargs": { "padding": False, }, "common_kwargs": { "return_tensors": "pt", }, } class LlavaNextVideoProcessor(ProcessorMixin): r""" Constructs a LLaVa-NeXT-Video processor which wraps a LLaVa-NeXT image processor, LLaVa-NeXT-Video video processor and a LLaMa tokenizer into a single processor. [`LlavaNextVideoProcessor`] offers all the functionalities of [`LlavaNextImageProcessor`], [`LlavaNextVideoImageProcessor`] and [`LlamaTokenizerFast`]. See the [`~LlavaNextVideoProcessor.__call__`] and [`~LlavaNextVideoProcessor.decode`] for more information. Args: video_processor ([`LlavaNextVideoVideoProcessor`], *optional*): The video processor is a required input. image_processor ([`LlavaNextImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`LlamaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): Jinja chat template that will be used in tokenizer's `apply_chat_template` patch_size (`int`, *optional*): Patch size from the vision tower. vision_feature_select_strategy (`str`, *optional*): The feature selection strategy used to select the vision feature from the vision backbone. Should be same as in model's config video_token (`str`, *optional*, defaults to `"<video>"`): Special token used to denote video location. image_token (`str`, *optional*, defaults to `"<image>"`): Special token used to denote image location. num_additional_image_tokens (`int`, *optional*, defaults to 0): Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other extra tokens appended, no need to set this arg. """ # video and image processor share same args, but have different processing logic # only image processor config is saved in the hub attributes = ["video_processor", "image_processor", "tokenizer"] image_processor_class = ("LlavaNextImageProcessor", "LlavaNextImageProcessorFast") video_processor_class = "AutoVideoProcessor" tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast") def __init__( self, video_processor=None, image_processor=None, tokenizer=None, chat_template=None, patch_size=None, vision_feature_select_strategy=None, video_token="<video>", image_token="<image>", num_additional_image_tokens=0, **kwargs, ): self.patch_size = patch_size self.num_additional_image_tokens = num_additional_image_tokens self.vision_feature_select_strategy = vision_feature_select_strategy self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token self.video_token = tokenizer.video_token if hasattr(tokenizer, "video_token") else video_token self.image_token_id = ( tokenizer.image_token_id if getattr(tokenizer, "image_token_id", None) else tokenizer.convert_tokens_to_ids(self.image_token) ) self.video_token_id = ( tokenizer.video_token_id if getattr(tokenizer, "video_token_id", None) else tokenizer.convert_tokens_to_ids(self.video_token) ) super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio=None, videos: VideoInput = None, **kwargs: Unpack[LlavaNextVideoProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to LlavaNextImageProcessor's [`~LlavaNextImageProcessor.__call__`] if `images` is not `None`. To prepare the video(s), this method forwards the `videos` and `kwrags` arguments to LlavaNextVideoImageProcessor's [`~LlavaNextVideoImageProcessor.__call__`] if `videos` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( LlavaNextVideoProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) else: image_inputs = {} if videos is not None: videos_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"]) else: videos_inputs = {} if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise TypeError("Invalid input text. Please provide a string, or a list of strings") if image_inputs: image_sizes = iter(image_inputs["image_sizes"]) height, width = get_image_size(to_numpy_array(image_inputs["pixel_values"][0][0])) prompt_strings = [] for sample in text: while self.image_token in sample: image_size = next(image_sizes) if not isinstance(image_size, (list, tuple)): # cast to list to avoid numerical precision errors when calculating unpadding image_size = image_size.tolist() orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features(orig_height, orig_width, height, width) if self.vision_feature_select_strategy == "default": num_image_tokens -= 1 sample = sample.replace(self.image_token, "<placeholder>" * num_image_tokens, 1) prompt_strings.append(sample) text = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings] # videos are easier, simply get frames and multiply if videos_inputs: one_video = videos_inputs.get("pixel_values_videos")[0] if isinstance(one_video, (list, tuple)): one_video = np.array(one_video) else: one_video = to_numpy_array(one_video) height, width = get_image_size(one_video[0]) num_frames = one_video.shape[0] # frame dim is always after batch dim # no `self.num_additional_image_tokens` added because video always has a default feature selection strategy num_image_tokens = (height // self.patch_size) * (width // self.patch_size) num_video_tokens = num_image_tokens // 4 * num_frames # divide by 4 needed for avg pooling layer prompt_strings = [] for sample in text: sample = sample.replace(self.video_token, self.video_token * num_video_tokens) prompt_strings.append(sample) text = prompt_strings return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) # Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_number_of_features def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int: image_grid_pinpoints = self.image_processor.image_grid_pinpoints height_best_resolution, width_best_resolution = select_best_resolution( [orig_height, orig_width], image_grid_pinpoints ) scale_height, scale_width = height_best_resolution // height, width_best_resolution // width patches_height = height // self.patch_size patches_width = width // self.patch_size unpadded_features, newline_features = self._get_unpadded_features( orig_height, orig_width, patches_height, patches_width, scale_height, scale_width ) # The base patch covers the entire image (+1 for the CLS) base_features = patches_height * patches_width + self.num_additional_image_tokens num_image_tokens = unpadded_features + newline_features + base_features return num_image_tokens # Copied from transformers.models.llava_next.processing_llava_next.LlavaNextProcessor._get_unpadded_features def _get_unpadded_features(self, height, width, patches_height, patches_width, scale_height, scale_width): """ Get number of features for a given image with height/width. LLaVA-NeXT is different from LLaVA because it divided each image into patches depending on its resolution. Therefore we need to calculate how many patches an image is divided into and get the number of features from that. """ current_height = patches_height * scale_height current_width = patches_width * scale_width original_aspect_ratio = width / height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: new_height = int(round(height * (current_width / width), 7)) padding = (current_height - new_height) // 2 current_height -= padding * 2 else: new_width = int(round(width * (current_height / height), 7)) padding = (current_width - new_width) // 2 current_width -= padding * 2 unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = LlavaNextVideoProcessorKwargs._defaults.get("images_kwargs", {}) images_kwargs.update(kwargs) size = images_kwargs.get("size", None) or self.image_processor.size size = ( (size["shortest_edge"], size["shortest_edge"]) if "shortest_edge" in size else (min(size["height"], size["width"]), min(size["height"], size["width"])) ) processed_height, processed_width = size batch_num_image_tokens = [] num_image_patches = [1] * len(image_sizes) # llava-next doesn't batch pixels as Idefics, thus `1` patch` for image_size in image_sizes: orig_height, orig_width = image_size num_image_tokens = self._get_number_of_features( orig_height, orig_width, processed_height, processed_width ) if self.vision_feature_select_strategy == "default": num_image_tokens -= 1 batch_num_image_tokens.append(num_image_tokens) vision_data.update({"num_image_tokens": batch_num_image_tokens, "num_image_patches": num_image_patches}) return MultiModalData(**vision_data) __all__ = ["LlavaNextVideoProcessor"]
transformers/src/transformers/models/llava_next_video/processing_llava_next_video.py/0
{ "file_path": "transformers/src/transformers/models/llava_next_video/processing_llava_next_video.py", "repo_id": "transformers", "token_count": 6354 }
487
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 LXMERT model.""" from __future__ import annotations import warnings from dataclasses import dataclass import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras, keras_serializable, shape_list, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: tf.Tensor | None = None vision_output: tf.Tensor | None = None pooled_output: tf.Tensor | None = None language_hidden_states: tuple[tf.Tensor] | None = None vision_hidden_states: tuple[tf.Tensor] | None = None language_attentions: tuple[tf.Tensor] | None = None vision_attentions: tuple[tf.Tensor] | None = None cross_encoder_attentions: tuple[tf.Tensor] | None = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: tf.Tensor | None = None prediction_logits: tf.Tensor | None = None cross_relationship_score: tf.Tensor | None = None question_answering_score: tf.Tensor | None = None language_hidden_states: tuple[tf.Tensor] | None = None vision_hidden_states: tuple[tf.Tensor] | None = None language_attentions: tuple[tf.Tensor] | None = None vision_attentions: tuple[tf.Tensor] | None = None cross_encoder_attentions: tuple[tf.Tensor] | None = None class TFLxmertVisualFeatureEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm") # Box position encoding self.box_fc = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.feat_dim = config.visual_feat_dim self.pos_dim = config.visual_pos_dim self.config = config def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build([None, None, self.feat_dim]) if getattr(self, "visn_layer_norm", None) is not None: with tf.name_scope(self.visn_layer_norm.name): self.visn_layer_norm.build([None, None, self.config.hidden_size]) if getattr(self, "box_fc", None) is not None: with tf.name_scope(self.box_fc.name): self.box_fc.build([None, None, self.pos_dim]) if getattr(self, "box_layer_norm", None) is not None: with tf.name_scope(self.box_layer_norm.name): self.box_layer_norm.build([None, None, self.config.hidden_size]) class TFLxmertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFLxmertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.ctx_dim = config.hidden_size self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.ctx_dim]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.ctx_dim]) class TFLxmertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFLxmertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertAttentionOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFLxmertSelfAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self", None) is not None: with tf.name_scope(self.self.name): self.self.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertCrossAttentionLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "att", None) is not None: with tf.name_scope(self.att.name): self.att.build(None) if getattr(self, "attention_output", None) is not None: with tf.name_scope(self.attention_output.name): self.attention_output.build(None) class TFLxmertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "transformer_output", None) is not None: with tf.name_scope(self.transformer_output.name): self.transformer_output.build(None) class TFLxmertXLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visual_attention", None) is not None: with tf.name_scope(self.visual_attention.name): self.visual_attention.build(None) if getattr(self, "lang_self_att", None) is not None: with tf.name_scope(self.lang_self_att.name): self.lang_self_att.build(None) if getattr(self, "visn_self_att", None) is not None: with tf.name_scope(self.visn_self_att.name): self.visn_self_att.build(None) if getattr(self, "lang_inter", None) is not None: with tf.name_scope(self.lang_inter.name): self.lang_inter.build(None) if getattr(self, "lang_output", None) is not None: with tf.name_scope(self.lang_output.name): self.lang_output.build(None) if getattr(self, "visn_inter", None) is not None: with tf.name_scope(self.visn_inter.name): self.visn_inter.build(None) if getattr(self, "visn_output", None) is not None: with tf.name_scope(self.visn_output.name): self.visn_output.build(None) class TFLxmertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "visn_fc", None) is not None: with tf.name_scope(self.visn_fc.name): self.visn_fc.build(None) if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "x_layers", None) is not None: for layer in self.x_layers: with tf.name_scope(layer.name): layer.build(None) if getattr(self, "r_layers", None) is not None: for layer in self.r_layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFLxmertMainLayer(keras.layers.Layer): config_class = LxmertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if visual_attention_mask is not None: extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) extended_visual_attention_mask = tf.multiply( tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst ) else: extended_visual_attention_mask = None # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions, training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) class TFLxmertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } @property def input_signature(self): return { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"), "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://huggingface.co/papers/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents spatial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, visual_attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple | TFLxmertModelOutput: outputs = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) class TFLxmertPooler(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert class TFLxmertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config: LxmertConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert class TFLxmertLMPredictionHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) def get_output_embeddings(self) -> keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert class TFLxmertMLMHead(keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) class TFLxmertPreTrainingHeads(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) self.config = config def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "predictions", None) is not None: with tf.name_scope(self.predictions.name): self.predictions.build(None) if getattr(self, "seq_relationship", None) is not None: with tf.name_scope(self.seq_relationship.name): self.seq_relationship.build([None, None, self.config.hidden_size]) class TFLxmertVisualAnswerHead(keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) self.hid_dim = hid_dim def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.hid_dim]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, self.hid_dim * 2]) if getattr(self, "dense_1", None) is not None: with tf.name_scope(self.dense_1.name): self.dense_1.build([None, None, self.hid_dim * 2]) class TFLxmertVisualObjHead(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } self.config = config def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "transform", None) is not None: with tf.name_scope(self.transform.name): self.transform.build(None) if getattr(self, "decoder_dict", None) is not None: for layer in self.decoder_dict.values(): with tf.name_scope(layer.name): layer.build([None, None, self.config.hidden_size]) @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } def get_lm_head(self): return self.cls.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: TFModelInputType | None = None, visual_feats: tf.Tensor | None = None, visual_pos: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, visual_attention_mask: tf.Tensor | None = None, token_type_ids: tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, masked_lm_labels: tf.Tensor | None = None, obj_labels: dict[str, tuple[tf.Tensor, tf.Tensor]] | None = None, matched_label: tf.Tensor | None = None, ans: tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> tuple[tf.Tensor] | TFLxmertForPreTrainingOutput: r""" masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels (`dict[Str: tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): a one hot representation hof the correct answer *optional* Returns: """ lxmert_output = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "lxmert", None) is not None: with tf.name_scope(self.lxmert.name): self.lxmert.build(None) if getattr(self, "cls", None) is not None: with tf.name_scope(self.cls.name): self.cls.build(None) if getattr(self, "obj_predict_head", None) is not None: with tf.name_scope(self.obj_predict_head.name): self.obj_predict_head.build(None) if getattr(self, "answer_head", None) is not None: with tf.name_scope(self.answer_head.name): self.answer_head.build(None) __all__ = [ "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ]
transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py/0
{ "file_path": "transformers/src/transformers/models/lxmert/modeling_tf_lxmert.py", "repo_id": "transformers", "token_count": 31851 }
488
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc.and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mask2Former model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class Mask2FormerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Mask2FormerModel`]. It is used to instantiate a Mask2Former model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mask2Former [facebook/mask2former-swin-small-coco-instance](https://huggingface.co/facebook/mask2former-swin-small-coco-instance) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Currently, Mask2Former only supports the [Swin Transformer](swin) as backbone. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. If unset, the configuration corresponding to `swin-base-patch4-window12-384` will be used. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. feature_size (`int`, *optional*, defaults to 256): The features (channels) of the resulting feature maps. mask_feature_size (`int`, *optional*, defaults to 256): The masks' features size, this value will also be used to specify the Feature Pyramid Network features' size. hidden_dim (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers. encoder_feedforward_dim (`int`, *optional*, defaults to 1024): Dimension of feedforward network for deformable detr encoder used as part of pixel decoder. encoder_layers (`int`, *optional*, defaults to 6): Number of layers in the deformable detr encoder used as part of pixel decoder. decoder_layers (`int`, *optional*, defaults to 10): Number of layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder. dim_feedforward (`int`, *optional*, defaults to 2048): Feature dimension in feedforward network for transformer decoder. pre_norm (`bool`, *optional*, defaults to `False`): Whether to use pre-LayerNorm or not for transformer decoder. enforce_input_projection (`bool`, *optional*, defaults to `False`): Whether to add an input projection 1x1 convolution even if the input channels and hidden dim are identical in the Transformer decoder. common_stride (`int`, *optional*, defaults to 4): Parameter used for determining number of FPN levels used as part of pixel decoder. ignore_value (`int`, *optional*, defaults to 255): Category id to be ignored during training. num_queries (`int`, *optional*, defaults to 100): Number of queries for the decoder. no_object_weight (`int`, *optional*, defaults to 0.1): The weight to apply to the null (no object) class. class_weight (`int`, *optional*, defaults to 2.0): The weight for the cross entropy loss. mask_weight (`int`, *optional*, defaults to 5.0): The weight for the mask loss. dice_weight (`int`, *optional*, defaults to 5.0): The weight for the dice loss. train_num_points (`str` or `function`, *optional*, defaults to 12544): Number of points used for sampling during loss calculation. oversample_ratio (`float`, *optional*, defaults to 3.0): Oversampling parameter used for calculating no. of sampled points importance_sample_ratio (`float`, *optional*, defaults to 0.75): Ratio of points that are sampled via importance sampling. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1.0): The scaling factor used for the Xavier initialization gain in the HM Attention map module. use_auxiliary_loss (`boolean``, *optional*, defaults to `True`): If `True` [`Mask2FormerForUniversalSegmentationOutput`] will contain the auxiliary losses computed using the logits from each decoder's stage. feature_strides (`list[int]`, *optional*, defaults to `[4, 8, 16, 32]`): Feature strides corresponding to features generated from backbone network. output_auxiliary_logits (`bool`, *optional*): Should the model output its `auxiliary_logits` or not. Examples: ```python >>> from transformers import Mask2FormerConfig, Mask2FormerModel >>> # Initializing a Mask2Former facebook/mask2former-swin-small-coco-instance configuration >>> configuration = Mask2FormerConfig() >>> # Initializing a model (with random weights) from the facebook/mask2former-swin-small-coco-instance style configuration >>> model = Mask2FormerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "mask2former" backbones_supported = ["swin"] attribute_map = {"hidden_size": "hidden_dim"} def __init__( self, backbone_config: Optional[dict] = None, feature_size: int = 256, mask_feature_size: int = 256, hidden_dim: int = 256, encoder_feedforward_dim: int = 1024, activation_function: str = "relu", encoder_layers: int = 6, decoder_layers: int = 10, num_attention_heads: int = 8, dropout: float = 0.0, dim_feedforward: int = 2048, pre_norm: bool = False, enforce_input_projection: bool = False, common_stride: int = 4, ignore_value: int = 255, num_queries: int = 100, no_object_weight: float = 0.1, class_weight: float = 2.0, mask_weight: float = 5.0, dice_weight: float = 5.0, train_num_points: int = 12544, oversample_ratio: float = 3.0, importance_sample_ratio: float = 0.75, init_std: float = 0.02, init_xavier_std: float = 1.0, use_auxiliary_loss: bool = True, feature_strides: list[int] = [4, 8, 16, 32], output_auxiliary_logits: Optional[bool] = None, backbone: Optional[str] = None, use_pretrained_backbone: bool = False, use_timm_backbone: bool = False, backbone_kwargs: Optional[dict] = None, **kwargs, ): if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") backbone_config = CONFIG_MAPPING["swin"]( image_size=224, num_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=False, out_features=["stage1", "stage2", "stage3", "stage4"], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) # verify that the backbone is supported if backbone_config is not None and backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " f"Supported model types: {','.join(self.backbones_supported)}" ) self.backbone_config = backbone_config self.feature_size = feature_size self.mask_feature_size = mask_feature_size self.hidden_dim = hidden_dim self.encoder_feedforward_dim = encoder_feedforward_dim self.activation_function = activation_function self.encoder_layers = encoder_layers self.decoder_layers = decoder_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.dim_feedforward = dim_feedforward self.pre_norm = pre_norm self.enforce_input_projection = enforce_input_projection self.common_stride = common_stride self.ignore_value = ignore_value self.num_queries = num_queries self.no_object_weight = no_object_weight self.class_weight = class_weight self.mask_weight = mask_weight self.dice_weight = dice_weight self.train_num_points = train_num_points self.oversample_ratio = oversample_ratio self.importance_sample_ratio = importance_sample_ratio self.init_std = init_std self.init_xavier_std = init_xavier_std self.use_auxiliary_loss = use_auxiliary_loss self.feature_strides = feature_strides self.output_auxiliary_logits = output_auxiliary_logits self.num_hidden_layers = decoder_layers self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs super().__init__(**kwargs) @property def sub_configs(self): return ( {"backbone_config": type(self.backbone_config)} if getattr(self, "backbone_config", None) is not None else {} ) @classmethod def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs): """Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`Mask2FormerConfig`]: An instance of a configuration object """ return cls( backbone_config=backbone_config, **kwargs, ) __all__ = ["Mask2FormerConfig"]
transformers/src/transformers/models/mask2former/configuration_mask2former.py/0
{ "file_path": "transformers/src/transformers/models/mask2former/configuration_mask2former.py", "repo_id": "transformers", "token_count": 4893 }
489
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MaskFormer Swin Transformer. The reason Swin Transformer is implemented here is because MaskFormer uses the hidden states before downsampling, which is different from the default Swin Transformer.""" import collections.abc import math from dataclasses import dataclass from typing import Optional import torch from torch import Tensor, nn from ...activations import ACT2FN from ...file_utils import ModelOutput from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import auto_docstring, torch_int from ...utils.backbone_utils import BackboneMixin from .configuration_maskformer_swin import MaskFormerSwinConfig @dataclass @auto_docstring( custom_intro=""" Class for MaskFormerSwinModel's outputs that also contains the spatial dimensions of the hidden states. """ ) class MaskFormerSwinModelOutputWithPooling(ModelOutput): r""" pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a mean pooling operation. hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*): A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to `batch, channels, height, width`. Due to padding, their spatial size cannot be inferred before the `forward` method. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Class for SwinEncoder's outputs. """ ) class MaskFormerSwinBaseModelOutput(ModelOutput): r""" hidden_states_spatial_dimensions (`tuple(tuple(int, int))`, *optional*): A tuple containing the spatial dimension of each `hidden_state` needed to reshape the `hidden_states` to `batch, channels, height, width`. Due to padding, their spatial size cannot inferred before the `forward` method. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None hidden_states_spatial_dimensions: tuple[tuple[int, int]] = None attentions: Optional[tuple[torch.FloatTensor]] = None # Copied from transformers.models.swin.modeling_swin.window_partition def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.window_reverse def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output class MaskFormerSwinEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = MaskFormerSwinPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values, interpolate_pos_encoding): _, num_channels, height, width = pixel_values.shape embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) if self.position_embeddings is not None: if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings with Swin->MaskFormerSwin class MaskFormerSwinPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]: _, num_channels, height, width = pixel_values.shape # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging class MaskFormerSwinPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be divisible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature # Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->MaskFormerSwin class MaskFormerSwinDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->MaskFormerSwin class MaskFormerSwinSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) ) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape hidden_shape = (batch_size, dim, -1, self.attention_head_size) query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in MaskFormerSwinModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->MaskFormerSwin class MaskFormerSwinSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->MaskFormerSwin class MaskFormerSwinAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = MaskFormerSwinSelfAttention(config, dim, num_heads, window_size) self.output = MaskFormerSwinSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->MaskFormerSwin class MaskFormerSwinIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->MaskFormerSwin class MaskFormerSwinOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class MaskFormerSwinLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0): super().__init__() self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = MaskFormerSwinAttention(config, dim, num_heads, self.window_size) self.drop_path = MaskFormerSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = MaskFormerSwinIntermediate(config, dim) self.output = MaskFormerSwinOutput(config, dim) def get_attn_mask(self, input_resolution): if self.shift_size > 0: # calculate attention mask for SW-MSA height, width = input_resolution img_mask = torch.zeros((1, height, width, 1)) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_left = pad_top = 0 pad_rigth = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, pad_left, pad_rigth, pad_top, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False): height, width = input_dimensions batch_size, dim, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask((height_pad, width_pad)) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) self_attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse( attention_windows, self.window_size, height_pad, width_pad ) # B height' width' C # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) outputs = (layer_output,) + outputs return outputs class MaskFormerSwinStage(GradientCheckpointingLayer): # Copied from transformers.models.swin.modeling_swin.SwinStage.__init__ with Swin->MaskFormerSwin def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList( [ MaskFormerSwinLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if (i % 2 == 0) else config.window_size // 2, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False ): all_hidden_states = () if output_hidden_states else None height, width = input_dimensions for i, block_module in enumerate(self.blocks): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None block_hidden_states = block_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) hidden_states = block_hidden_states[0] if output_hidden_states: all_hidden_states += (hidden_states,) if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states, input_dimensions) else: output_dimensions = (height, width, height, width) return hidden_states, output_dimensions, all_hidden_states class MaskFormerSwinEncoder(nn.Module): # Copied from transformers.models.swin.modeling_swin.SwinEncoder.__init__ with Swin->MaskFormerSwin def __init__(self, config, grid_size): super().__init__() self.num_layers = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")] self.layers = nn.ModuleList( [ MaskFormerSwinStage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=MaskFormerSwinPatchMerging if (i_layer < self.num_layers - 1) else None, ) for i_layer in range(self.num_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states, input_dimensions, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_input_dimensions = () all_self_attentions = () if output_attentions else None if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None layer_hidden_states, output_dimensions, layer_all_hidden_states = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, output_hidden_states, ) input_dimensions = (output_dimensions[-2], output_dimensions[-1]) all_input_dimensions += (input_dimensions,) if output_hidden_states: all_hidden_states += (layer_all_hidden_states,) hidden_states = layer_hidden_states if output_attentions: all_self_attentions = all_self_attentions + (layer_all_hidden_states[1],) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return MaskFormerSwinBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, hidden_states_spatial_dimensions=all_input_dimensions, attentions=all_self_attentions, ) @auto_docstring class MaskFormerSwinPreTrainedModel(PreTrainedModel): config: MaskFormerSwinConfig base_model_prefix = "model" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["MaskFormerSwinStage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MaskFormerSwinEmbeddings): if module.position_embeddings is not None: module.position_embeddings.data.zero_() elif isinstance(module, MaskFormerSwinSelfAttention): module.relative_position_bias_table.data.zero_() class MaskFormerSwinModel(MaskFormerSwinPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = MaskFormerSwinEmbeddings(config) self.encoder = MaskFormerSwinEncoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, pixel_values=None, head_mask=None, output_attentions=None, output_hidden_states=None, interpolate_pos_encoding=False, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs.last_hidden_state if return_dict else encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] hidden_states_spatial_dimensions = (input_dimensions,) + encoder_outputs.hidden_states_spatial_dimensions return MaskFormerSwinModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, hidden_states_spatial_dimensions=hidden_states_spatial_dimensions, attentions=encoder_outputs.attentions, ) class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin): """ MaskFormerSwin backbone, designed especially for the MaskFormer framework. This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size, num_channels, height, width)`). It also adds additional layernorms after each stage. Args: config (`MaskFormerSwinConfig`): The configuration used by [`MaskFormerSwinModel`]. """ def __init__(self, config: MaskFormerSwinConfig): super().__init__(config) super()._init_backbone(config) self.model = MaskFormerSwinModel(config) if "stem" in self.out_features: raise ValueError("This backbone does not support 'stem' in the `out_features`.") self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] self.hidden_states_norms = nn.ModuleList( [nn.LayerNorm(num_channels) for num_channels in self.num_features[1:]] ) # Initialize weights and apply final processing self.post_init() def forward( self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.model( pixel_values, output_hidden_states=True, output_attentions=output_attentions, return_dict=True ) # we skip the stem hidden_states = outputs.hidden_states[1:] # we need to reshape the hidden states to their original spatial dimensions # spatial dimensions contains all the heights and widths of each stage, including after the embeddings spatial_dimensions: tuple[tuple[int, int]] = outputs.hidden_states_spatial_dimensions feature_maps = () for i, (hidden_state, stage, (height, width)) in enumerate( zip(hidden_states, self.stage_names[1:], spatial_dimensions) ): norm = self.hidden_states_norms[i] # the last element correspond to the layer's last block output but before patch merging hidden_state_unpolled = hidden_state[-1] hidden_state_norm = norm(hidden_state_unpolled) # the pixel decoder (FPN) expects 3D tensors (features) batch_size, _, hidden_size = hidden_state_norm.shape # reshape "b (h w) d -> b d h w" hidden_state_permuted = ( hidden_state_norm.permute(0, 2, 1).view((batch_size, hidden_size, height, width)).contiguous() ) if stage in self.out_features: feature_maps += (hidden_state_permuted,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) if output_attentions: output += (outputs.attentions,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = ["MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel"]
transformers/src/transformers/models/maskformer/modeling_maskformer_swin.py/0
{ "file_path": "transformers/src/transformers/models/maskformer/modeling_maskformer_swin.py", "repo_id": "transformers", "token_count": 16800 }
490
# coding=utf-8 # Copyright 2024 Kyutai, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Mimi model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, StaticCache from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_mimi import MimiConfig if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) @dataclass @auto_docstring class MimiOutput(ModelOutput): r""" audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. audio_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Decoded audio values, obtained using the decoder part of Mimi. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). """ audio_codes: Optional[torch.LongTensor] = None audio_values: Optional[torch.FloatTensor] = None encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None class MimiConv1dPaddingCache: """ Padding cache for MimiConv1d causal convolutions in order to support streaming via cache padding. See: https://arxiv.org/pdf/2005.06720 & https://arxiv.org/pdf/2204.07064 A padding cache is a list of cached partial hidden states for each convolution layer. Hidden states are cached from the previous call to the MimiConv1d forward pass, given the padding size. """ def __init__( self, num_layers: int, per_layer_padding: list[int], per_layer_padding_mode: list[str], per_layer_in_channels: list[int], ): # ensure correct number of layers for each arg from_args_num_layers = {len(per_layer_padding), len(per_layer_padding_mode), len(per_layer_in_channels)} if len(from_args_num_layers) != 1 or from_args_num_layers.pop() != num_layers: raise ValueError( f"Expected `num_layers` ({num_layers}) values in `per_layer_padding`, `per_layer_padding_mode` and `per_layer_in_channels`" ) elif not all(mode in ["constant", "replicate"] for mode in per_layer_padding_mode): raise NotImplementedError( "`padding_cache` is not supported for convolutions using other than `constant` or `replicate` padding mode" ) self.per_layer_padding = per_layer_padding self.per_layer_padding_mode = per_layer_padding_mode self.per_layer_in_channels = per_layer_in_channels self.per_layer_is_init = [True] * num_layers self.padding_cache = [None] * num_layers def update(self, hidden_states: torch.Tensor, layer_idx: int): """ Updates the padding cache with the new padding states for the layer `layer_idx` and returns the current cache. Parameters: hidden_states (`torch.Tensor`): The hidden states to be partially cached. layer_idx (`int`): The index of the layer to cache the states for. Returns: `torch.Tensor` or `None`, the current padding cache. """ batch_size, dtype, device = hidden_states.shape[0], hidden_states.dtype, hidden_states.device padding = self.per_layer_padding[layer_idx] padding_mode = self.per_layer_padding_mode[layer_idx] in_channels = self.per_layer_in_channels[layer_idx] if self.padding_cache[layer_idx] is None: if padding_mode == "constant": current_cache = torch.zeros( batch_size, in_channels, padding, device=device, dtype=dtype, ) elif padding_mode == "replicate": current_cache = ( torch.ones( batch_size, in_channels, padding, device=device, dtype=dtype, ) * hidden_states[..., :1] ) else: current_cache = self.padding_cache[layer_idx] # update the cache if padding > 0: padding_states = hidden_states[:, :, -padding:] else: padding_states = torch.empty(batch_size, in_channels, padding, dtype=dtype, device=device) self.padding_cache[layer_idx] = padding_states return current_cache @dataclass @auto_docstring class MimiEncoderOutput(ModelOutput): r""" audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). padding_cache (`MimiConv1dPaddingCache`, *optional*): Padding cache for MimiConv1d causal convolutions in order to support streaming via cache padding. """ audio_codes: Optional[torch.LongTensor] = None encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None padding_cache: Optional[MimiConv1dPaddingCache] = None @dataclass @auto_docstring class MimiDecoderOutput(ModelOutput): r""" audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*): Decoded audio values, obtained using the decoder part of Mimi. decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). """ audio_values: Optional[torch.FloatTensor] = None decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None class MimiConv1d(nn.Module): """Conv1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1, groups: int = 1, pad_mode: Optional[str] = None, bias: bool = True, layer_idx: Optional[int] = None, ): super().__init__() self.causal = config.use_causal_conv self.pad_mode = config.pad_mode if pad_mode is None else pad_mode self.layer_idx = layer_idx self.in_channels = in_channels # warn user on unusual setup between dilation and stride if stride > 1 and dilation > 1: logger.warning( "MimiConv1d has been initialized with stride > 1 and dilation > 1" f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})." ) self.conv = nn.Conv1d( in_channels, out_channels, kernel_size, stride, dilation=dilation, groups=groups, bias=bias ) kernel_size = self.conv.kernel_size[0] stride = torch.tensor(self.conv.stride[0], dtype=torch.int64) dilation = self.conv.dilation[0] # Effective kernel size with dilations. kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64) self.register_buffer("stride", stride, persistent=False) self.register_buffer("kernel_size", kernel_size, persistent=False) self.register_buffer("padding_total", kernel_size - stride, persistent=False) # Asymmetric padding required for odd strides self.padding_right = self.padding_total // 2 self.padding_left = self.padding_total - self.padding_right def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv) # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._get_extra_padding_for_conv1d def _get_extra_padding_for_conv1d( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """See `pad_for_conv1d`.""" length = hidden_states.shape[-1] n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total return ideal_length - length @staticmethod # Copied from transformers.models.encodec.modeling_encodec.EncodecConv1d._pad1d def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str = "zero", value: float = 0.0): """Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input. If this is the case, we insert extra 0 padding to the right before the reflection happens. """ length = hidden_states.shape[-1] padding_left, padding_right = paddings if mode != "reflect": return nn.functional.pad(hidden_states, paddings, mode, value) max_pad = max(padding_left, padding_right) extra_pad = 0 if length <= max_pad: extra_pad = max_pad - length + 1 hidden_states = nn.functional.pad(hidden_states, (0, extra_pad)) padded = nn.functional.pad(hidden_states, paddings, mode, value) end = padded.shape[-1] - extra_pad return padded[..., :end] def _get_output_length(self, input_length: torch.LongTensor) -> torch.LongTensor: """ Return the length of the output of the MimiConv1d. """ # padding size n_frames = (input_length - self.kernel_size + self.padding_total) / self.stride + 1 n_frames = torch.ceil(n_frames).to(torch.int64) - 1 ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total extra_padding = ideal_length - input_length if self.causal: padding_left = self.padding_total padding_right = extra_padding else: padding_left = self.padding_left padding_right = self.padding_right + extra_padding # padding input_length = input_length + padding_left + padding_right # conv output_lenght = ( input_length + 2 * self.conv.padding[0] - self.conv.dilation[0] * (self.conv.kernel_size[0] - 1) - 1 ) // self.conv.stride[0] + 1 return output_lenght def forward(self, hidden_states, padding_cache=None): extra_padding = self._get_extra_padding_for_conv1d(hidden_states) if not self.causal and padding_cache is not None: raise ValueError("`padding_cache` is not supported for non-causal convolutions.") if self.causal and padding_cache is not None: layer_padding_cache = padding_cache.update(hidden_states, self.layer_idx) hidden_states = torch.cat([layer_padding_cache, hidden_states], dim=2) elif self.causal: # Left padding for causal hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode) else: hidden_states = self._pad1d( hidden_states, (self.padding_left, self.padding_right + extra_padding), mode=self.pad_mode ) hidden_states = self.conv(hidden_states) return hidden_states class MimiConvTranspose1d(nn.Module): """ConvTranspose1d with asymmetric or causal padding and normalization.""" def __init__( self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, bias=True, ): super().__init__() self.causal = config.use_causal_conv self.trim_right_ratio = config.trim_right_ratio self.conv = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, stride, groups=groups, bias=bias) if not (self.causal or self.trim_right_ratio == 1.0): raise ValueError("`trim_right_ratio` != 1.0 only makes sense for causal convolutions") kernel_size = self.conv.kernel_size[0] stride = self.conv.stride[0] padding_total = kernel_size - stride # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be # removed at the very end, when keeping only the right length for the output, # as removing it here would require also passing the length at the matching layer # in the encoder. if self.causal: # Trim the padding on the right according to the specified ratio # if trim_right_ratio = 1.0, trim everything from right self.padding_right = math.ceil(padding_total * self.trim_right_ratio) else: # Asymmetric padding required for odd strides self.padding_right = padding_total // 2 self.padding_left = padding_total - self.padding_right def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) # unpad end = hidden_states.shape[-1] - self.padding_right hidden_states = hidden_states[..., self.padding_left : end] return hidden_states class MimiResnetBlock(nn.Module): """ Residual block from SEANet model as used by Mimi. """ def __init__(self, config: MimiConfig, dim: int, dilations: list[int]): super().__init__() kernel_sizes = (config.residual_kernel_size, 1) if len(kernel_sizes) != len(dilations): raise ValueError("Number of kernel sizes should match number of dilations") hidden = dim // config.compress block = [] for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)): in_chs = dim if i == 0 else hidden out_chs = dim if i == len(kernel_sizes) - 1 else hidden block += [nn.ELU()] block += [MimiConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)] self.block = nn.ModuleList(block) if config.use_conv_shortcut: self.shortcut = MimiConv1d(config, dim, dim, kernel_size=1) else: self.shortcut = nn.Identity() def forward(self, hidden_states, padding_cache=None): residual = hidden_states for layer in self.block: if isinstance(layer, MimiConv1d): hidden_states = layer(hidden_states, padding_cache=padding_cache) else: hidden_states = layer(hidden_states) if isinstance(self.shortcut, MimiConv1d): residual = self.shortcut(residual, padding_cache=padding_cache) else: residual = self.shortcut(residual) return residual + hidden_states class MimiEncoder(nn.Module): """SEANet encoder as used by Mimi.""" def __init__(self, config: MimiConfig): super().__init__() model = [MimiConv1d(config, config.audio_channels, config.num_filters, config.kernel_size)] scaling = 1 # keep track of MimiConv1d submodule layer names for easy encoded length computation mimiconv1d_layer_names = ["layers.0"] # Downsample to raw audio scale for ratio in reversed(config.upsampling_ratios): current_scale = scaling * config.num_filters # Add residual layers for j in range(config.num_residual_layers): mimiconv1d_layer_names.extend([f"layers.{len(model)}.block.1", f"layers.{len(model)}.block.3"]) model += [MimiResnetBlock(config, current_scale, [config.dilation_growth_rate**j, 1])] # Add downsampling layers model += [nn.ELU()] mimiconv1d_layer_names.append(f"layers.{len(model)}") model += [MimiConv1d(config, current_scale, current_scale * 2, kernel_size=ratio * 2, stride=ratio)] scaling *= 2 model += [nn.ELU()] mimiconv1d_layer_names.append(f"layers.{len(model)}") model += [MimiConv1d(config, scaling * config.num_filters, config.hidden_size, config.last_kernel_size)] self.layers = nn.ModuleList(model) self._mimiconv1d_layer_names = mimiconv1d_layer_names # initialize layer_idx for MimiConv1d submodules, necessary for padding_cache for layer_idx, layername in enumerate(self._mimiconv1d_layer_names): conv_layer = self.get_submodule(layername) setattr(conv_layer, "layer_idx", layer_idx) def forward(self, hidden_states, padding_cache=None): for layer in self.layers: if isinstance(layer, (MimiConv1d, MimiResnetBlock)): hidden_states = layer(hidden_states, padding_cache=padding_cache) else: hidden_states = layer(hidden_states) return hidden_states class MimiLayerScale(nn.Module): """Layer scale from [Touvron et al 2021] (https://huggingface.co/papers/2103.17239). This rescales diagonally the residual outputs close to 0, with a learnt scale. """ def __init__(self, config): super().__init__() channels = config.hidden_size initial_scale = config.layer_scale_initial_scale self.scale = nn.Parameter(torch.full((channels,), initial_scale, requires_grad=True)) def forward(self, x: torch.Tensor): return self.scale * x # Copied from transformers.models.mistral.modeling_mistral.MistralRotaryEmbedding with Mistral->Mimi class MimiRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: MimiConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class MimiMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False) # Copied from transformers.models.clip.modeling_clip.CLIPMLP.forward def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) # copied from transformers.models.gemma.modeling_gemma.GemmaAttention with Gemma->Mimi # no longer copied after attention refactors class MimiAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: MimiConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.scaling = 1 / math.sqrt(config.head_dim) if self.hidden_size % self.num_heads != 0: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias) self.rotary_emb = MimiRotaryEmbedding(config) self.sliding_window = config.sliding_window # Ignore copy @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaFlashAttention2 with Gemma->Mimi # TODO cyril: modular class MimiFlashAttention2(MimiAttention): """ Mimi flash attention module. This module inherits from `MimiAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError( "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" ) output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (MimiRMSNorm handles it correctly) input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaSdpaAttention with Gemma->Mimi # TODO cyril: modular class MimiSdpaAttention(MimiAttention): """ Mimi attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from `MimiAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to SDPA API. """ # Adapted from MimiAttention.forward @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "MimiModel is using MimiSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and causal_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = causal_mask is None and q_len > 1 attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None MIMI_ATTENTION_CLASSES = { "eager": MimiAttention, "flash_attention_2": MimiFlashAttention2, "sdpa": MimiSdpaAttention, } class MimiTransformerLayer(GradientCheckpointingLayer): def __init__(self, config: MimiConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = MIMI_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) self.mlp = MimiMLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps) self.self_attn_layer_scale = MimiLayerScale(config) self.mlp_layer_scale = MimiLayerScale(config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence kwargs (`dict`, *optional*): Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code into the model """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = residual + self.self_attn_layer_scale(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + self.mlp_layer_scale(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class MimiTransformerModel(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MimiTransformerLayer`] Args: config: MimiConfig """ def __init__(self, config: MimiConfig): super().__init__() self.layers = nn.ModuleList( [MimiTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.config = config def forward( self, hidden_states: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Embedded representation that will be contextualized by the model attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. Two formats are allowed: - a [`~cache_utils.Cache`] instance; - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy cache format. The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the legacy cache format will be returned. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=hidden_states, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) class MimiDecoder(nn.Module): """SEANet decoder as used by Mimi.""" def __init__(self, config: MimiConfig): super().__init__() scaling = int(2 ** len(config.upsampling_ratios)) model = [MimiConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)] # Upsample to raw audio scale for ratio in config.upsampling_ratios: current_scale = scaling * config.num_filters # Add upsampling layers model += [nn.ELU()] model += [ MimiConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio) ] # Add residual layers for j in range(config.num_residual_layers): model += [MimiResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))] scaling //= 2 # Add final layers model += [nn.ELU()] model += [MimiConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)] self.layers = nn.ModuleList(model) # Copied from transformers.models.encodec.modeling_encodec.EncodecDecoder.forward def forward(self, hidden_states): for layer in self.layers: hidden_states = layer(hidden_states) return hidden_states class MimiEuclideanCodebook(nn.Module): """Codebook with Euclidean distance.""" def __init__(self, config: MimiConfig, epsilon: float = 1e-5): super().__init__() embed = torch.zeros(config.codebook_size, config.codebook_dim) self.codebook_size = config.codebook_size self.register_buffer("initialized", torch.tensor([True], dtype=torch.float32)) self.register_buffer("cluster_usage", torch.ones(config.codebook_size)) self.register_buffer("embed_sum", embed) self._embed = None self.epsilon = epsilon @property def embed(self) -> torch.Tensor: if self._embed is None: self._embed = self.embed_sum / self.cluster_usage.clamp(min=self.epsilon)[:, None] return self._embed def quantize(self, hidden_states): # Projects each vector in `hidden_states` over the nearest centroid and return its index. # `hidden_states` should be `[N, D]` with `N` the number of input vectors and `D` the dimension. dists = torch.cdist(hidden_states[None].float(), self.embed[None].float(), p=2)[0] embed_ind = dists.argmin(dim=-1) return embed_ind # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.encode def encode(self, hidden_states): shape = hidden_states.shape # pre-process hidden_states = hidden_states.reshape((-1, shape[-1])) # quantize embed_ind = self.quantize(hidden_states) # post-process embed_ind = embed_ind.view(*shape[:-1]) return embed_ind # Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.decode def decode(self, embed_ind): quantize = nn.functional.embedding(embed_ind, self.embed) return quantize # Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization with Encodec->Mimi class MimiVectorQuantization(nn.Module): """ Vector quantization implementation. Currently supports only euclidean distance. """ def __init__(self, config: MimiConfig): super().__init__() self.codebook = MimiEuclideanCodebook(config) def encode(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1) embed_in = self.codebook.encode(hidden_states) return embed_in def decode(self, embed_ind): quantize = self.codebook.decode(embed_ind) quantize = quantize.permute(0, 2, 1) return quantize class MimiResidualVectorQuantizer(nn.Module): """Residual Vector Quantizer.""" def __init__(self, config: MimiConfig, num_quantizers: Optional[int] = None): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.num_quantizers = num_quantizers if num_quantizers is not None else config.num_quantizers self.layers = nn.ModuleList([MimiVectorQuantization(config) for _ in range(self.num_quantizers)]) self.input_proj = None self.output_proj = None if config.vector_quantization_hidden_dimension != config.hidden_size: self.input_proj = torch.nn.Conv1d( config.hidden_size, config.vector_quantization_hidden_dimension, 1, bias=False ) self.output_proj = torch.nn.Conv1d( config.vector_quantization_hidden_dimension, config.hidden_size, 1, bias=False ) def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[int] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ if self.input_proj is not None: embeddings = self.input_proj(embeddings) num_quantizers = num_quantizers if num_quantizers is not None else self.num_quantizers residual = embeddings all_indices = [] for layer in self.layers[:num_quantizers]: indices = layer.encode(residual) quantized = layer.decode(indices) residual = residual - quantized all_indices.append(indices) out_indices = torch.stack(all_indices) return out_indices def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes of shape [B, K, T] to the quantized representation.""" quantized_out = torch.tensor(0.0, device=codes.device) codes = codes.transpose(0, 1) for i, indices in enumerate(codes): layer = self.layers[i] quantized = layer.decode(indices) quantized_out = quantized_out + quantized if self.output_proj is not None: quantized_out = self.output_proj(quantized_out) return quantized_out class MimiSplitResidualVectorQuantizer(nn.Module): """Split Residual Vector Quantizer.""" def __init__(self, config: MimiConfig): super().__init__() self.codebook_size = config.codebook_size self.frame_rate = config.frame_rate self.max_num_quantizers = config.num_quantizers self.num_semantic_quantizers = config.num_semantic_quantizers self.num_acoustic_quantizers = config.num_quantizers - config.num_semantic_quantizers self.semantic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_semantic_quantizers) self.acoustic_residual_vector_quantizer = MimiResidualVectorQuantizer(config, self.num_acoustic_quantizers) def encode(self, embeddings: torch.Tensor, num_quantizers: Optional[float] = None) -> torch.Tensor: """ Encode a given input tensor with the specified frame rate at the given number of quantizers / codebooks. The RVQ encode method sets the appropriate number of quantizers to use and returns indices for each quantizer. """ num_quantizers = self.max_num_quantizers if num_quantizers is None else num_quantizers if num_quantizers > self.max_num_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.max_num_quantizers}, but is currently {num_quantizers}." ) if num_quantizers < self.num_semantic_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be higher than the number of semantic quantizers {self.num_semantic_quantizers}, but is currently {num_quantizers}." ) # codes is [K, B, T], with T frames, K nb of codebooks. codes = self.semantic_residual_vector_quantizer.encode(embeddings) if num_quantizers > self.num_semantic_quantizers: acoustic_codes = self.acoustic_residual_vector_quantizer.encode( embeddings, num_quantizers=num_quantizers - self.num_semantic_quantizers ) codes = torch.cat([codes, acoustic_codes], dim=0) return codes def decode(self, codes: torch.Tensor) -> torch.Tensor: """Decode the given codes to the quantized representation.""" # The first num_semantic_quantizers codebooks are decoded using the semantic RVQ quantized_out = self.semantic_residual_vector_quantizer.decode(codes[:, : self.num_semantic_quantizers]) # The rest of the codebooks are decoded using the acoustic RVQ if codes.shape[1] > self.num_semantic_quantizers: quantized_out += self.acoustic_residual_vector_quantizer.decode(codes[:, self.num_semantic_quantizers :]) return quantized_out @auto_docstring class MimiPreTrainedModel(PreTrainedModel): config: MimiConfig base_model_prefix = "mimi" main_input_name = "input_values" supports_gradient_checkpointing = True _no_split_modules = ["MimiDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, MimiLayerScale): module.scale.data.fill_(self.config.layer_scale_initial_scale) @auto_docstring( custom_intro=""" The Mimi neural audio codec model. """ ) class MimiModel(MimiPreTrainedModel): def __init__(self, config: MimiConfig): super().__init__(config) self.config = config self.encoder = MimiEncoder(config) self.encoder_transformer = MimiTransformerModel(config) self.downsample = None self.upsample = None if config.frame_rate != config.encodec_frame_rate: self.downsample = MimiConv1d( config, config.hidden_size, config.hidden_size, kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate), stride=2, bias=False, pad_mode="replicate", layer_idx=len(self.encoder._mimiconv1d_layer_names), ) self.upsample = MimiConvTranspose1d( config, config.hidden_size, config.hidden_size, kernel_size=2 * int(config.encodec_frame_rate / config.frame_rate), stride=2, bias=False, groups=config.upsample_groups, ) self.decoder_transformer = MimiTransformerModel(config) self.decoder = MimiDecoder(config) self.quantizer = MimiSplitResidualVectorQuantizer(config) self.bits_per_codebook = int(math.log2(self.config.codebook_size)) if 2**self.bits_per_codebook != self.config.codebook_size: raise ValueError("The codebook_size must be a power of 2.") # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _encode_frame( self, input_values: torch.Tensor, num_quantizers: int, padding_mask: int, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, padding_cache: Optional[MimiConv1dPaddingCache] = None, return_dict: Optional[bool] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """ Encodes the given input using the underlying VQVAE. The padding mask is required to compute the correct scale. """ # TODO: @eustlb, let's make the encoder support padding_mask so that batched inputs are supported. embeddings = self.encoder(input_values, padding_cache=padding_cache) # TODO: @eustlb, convert the padding mask to attention mask. encoder_outputs = self.encoder_transformer( embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict ) if return_dict: past_key_values = encoder_outputs.get("past_key_values") elif len(encoder_outputs) > 1: past_key_values = encoder_outputs[1] embeddings = encoder_outputs[0].transpose(1, 2) embeddings = self.downsample(embeddings, padding_cache=padding_cache) codes = self.quantizer.encode(embeddings, num_quantizers) codes = codes.transpose(0, 1) return codes, past_key_values, padding_cache def get_encoded_length(self, input_length: torch.LongTensor) -> torch.LongTensor: """ Return the number of frames of the encoded audio waveform. """ output_length = input_length # encoder for layer_name in self.encoder._mimiconv1d_layer_names: output_length = self.encoder.get_submodule(layer_name)._get_output_length(output_length) # downsample output_length = self.downsample._get_output_length(output_length) return output_length def get_audio_codes_mask(self, padding_mask: torch.Tensor, padding_side: str = "right"): """ Get the mask for the audio codes from the original padding mask. """ encoded_lengths = self.get_encoded_length(padding_mask.sum(dim=-1)) audio_codes_mask = torch.arange(encoded_lengths.max(), device=encoded_lengths.device).expand( len(encoded_lengths), -1 ) audio_codes_mask = audio_codes_mask < encoded_lengths.unsqueeze(1) audio_codes_mask = audio_codes_mask.to(padding_mask.device) if padding_side == "right": return audio_codes_mask else: return audio_codes_mask.flip(dims=[-1]) def encode( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, num_quantizers: Optional[float] = None, encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, padding_cache: Optional[MimiConv1dPaddingCache] = None, use_streaming: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, Optional[torch.Tensor]], MimiEncoderOutput]: """ Encodes the input audio waveform into discrete codes. Args: input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Float values of the input audio waveform. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked*. num_quantizers (`int`, *optional*): Number of quantizers (i.e codebooks) to use. By default, all quantizers are used. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: `codebook` of shape `[batch_size, num_codebooks, frames]`, the discrete encoded codes for the input audio waveform. """ return_dict = return_dict if return_dict is not None else self.config.return_dict use_streaming = use_streaming if use_streaming is not None else self.config.use_streaming num_quantizers = self.config.num_quantizers if num_quantizers is None else num_quantizers if num_quantizers > self.config.num_quantizers: raise ValueError( f"The number of quantizers (i.e codebooks) asked should be lower than the total number of quantizers {self.config.num_quantizers}, but is currently {num_quantizers}." ) _, channels, input_length = input_values.shape if channels < 1 or channels > 2: raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}") if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if use_streaming and padding_cache is None: per_layer_padding, per_layer_padding_mode, per_layer_in_channels = [], [], [] for layer_name in self.encoder._mimiconv1d_layer_names: per_layer_padding.append(self.encoder.get_submodule(layer_name).padding_total) per_layer_padding_mode.append(self.encoder.get_submodule(layer_name).pad_mode) per_layer_in_channels.append(self.encoder.get_submodule(layer_name).in_channels) # downsample layer per_layer_padding.append(self.downsample.padding_total) per_layer_padding_mode.append(self.downsample.pad_mode) per_layer_in_channels.append(self.downsample.in_channels) padding_cache = MimiConv1dPaddingCache( num_layers=len(self.encoder._mimiconv1d_layer_names) + 1, per_layer_padding=per_layer_padding, per_layer_padding_mode=per_layer_padding_mode, per_layer_in_channels=per_layer_in_channels, ) encoded_frames, encoder_past_key_values, padding_cache = self._encode_frame( input_values, num_quantizers, padding_mask.bool(), past_key_values=encoder_past_key_values, padding_cache=padding_cache, return_dict=return_dict, ) if not return_dict: return ( encoded_frames, encoder_past_key_values, padding_cache, ) return MimiEncoderOutput(encoded_frames, encoder_past_key_values, padding_cache) def _decode_frame( self, codes: torch.Tensor, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> torch.Tensor: embeddings = self.quantizer.decode(codes) embeddings = self.upsample(embeddings) decoder_outputs = self.decoder_transformer( embeddings.transpose(1, 2), past_key_values=past_key_values, return_dict=return_dict ) if return_dict: past_key_values = decoder_outputs.get("past_key_values") elif len(decoder_outputs) > 1: past_key_values = decoder_outputs[1] embeddings = decoder_outputs[0].transpose(1, 2) outputs = self.decoder(embeddings) return outputs, past_key_values def decode( self, audio_codes: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, torch.Tensor], MimiDecoderOutput]: """ Decodes the given frames into an output audio waveform. Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be trimmed. Args: audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked*. decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ return_dict = return_dict if return_dict is not None else self.config.return_dict audio_values, decoder_past_key_values = self._decode_frame( audio_codes, past_key_values=decoder_past_key_values, return_dict=return_dict ) # truncate based on padding mask if padding_mask is not None and padding_mask.shape[-1] < audio_values.shape[-1]: audio_values = audio_values[..., : padding_mask.shape[-1]] if not return_dict: return ( audio_values, decoder_past_key_values, ) return MimiDecoderOutput(audio_values, decoder_past_key_values) @auto_docstring def forward( self, input_values: torch.Tensor, padding_mask: Optional[torch.Tensor] = None, num_quantizers: Optional[int] = None, audio_codes: Optional[torch.Tensor] = None, encoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, decoder_past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, torch.Tensor], MimiOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, channels, sequence_length)`, *optional*): Raw audio input converted to Float. padding_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked* or 0 for *masked*. num_quantizers (`int`, *optional*): Number of quantizers (i.e codebooks) to use. By default, all quantizers are used. audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*): Discret code embeddings computed using `model.encode`. encoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). decoder_past_key_values (`Cache`, *optional*): Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer. This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. The model will output the same cache format that is fed as input. If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't have their past key value states given to this model). Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoFeatureExtractor, MimiModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model_id = "kyutai/mimi" >>> model = MimiModel.from_pretrained(model_id) >>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) >>> inputs = feature_extractor(raw_audio=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_codes = outputs.audio_codes >>> audio_values = outputs.audio_values ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if padding_mask is None: padding_mask = torch.ones_like(input_values).bool() if audio_codes is None: encoder_outputs = self.encode( input_values, padding_mask, num_quantizers, encoder_past_key_values, return_dict=return_dict ) audio_codes = encoder_outputs[0] if return_dict: encoder_past_key_values = encoder_outputs.get("past_key_values") elif len(encoder_outputs) > 1: encoder_past_key_values = encoder_outputs[1] decoder_outputs = self.decode(audio_codes, padding_mask, decoder_past_key_values, return_dict=return_dict) audio_values = decoder_outputs[0] if return_dict: decoder_past_key_values = decoder_outputs.get("past_key_values") elif len(decoder_outputs) > 1: decoder_past_key_values = decoder_outputs[1] if not return_dict: return (audio_codes, audio_values, encoder_past_key_values, decoder_past_key_values) return MimiOutput( audio_codes=audio_codes, audio_values=audio_values, encoder_past_key_values=encoder_past_key_values, decoder_past_key_values=decoder_past_key_values, ) __all__ = ["MimiModel", "MimiPreTrainedModel"]
transformers/src/transformers/models/mimi/modeling_mimi.py/0
{ "file_path": "transformers/src/transformers/models/mimi/modeling_mimi.py", "repo_id": "transformers", "token_count": 33903 }
491
# coding=utf-8 # Copyright 2025 HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...processing_utils import Unpack from ...utils import logging from ..llava.modeling_llava import ( LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast, LlavaPreTrainedModel, TransformersKwargs, ) from ..mistral.modeling_mistral import MistralRMSNorm from .configuration_mistral3 import Mistral3Config logger = logging.get_logger(__name__) class Mistral3RMSNorm(MistralRMSNorm): pass class Mistral3PatchMerger(nn.Module): """ Learned merging of spatial_merge_size ** 2 patches """ def __init__(self, config: Mistral3Config): super().__init__() self.config = config hidden_size = config.vision_config.hidden_size self.spatial_merge_size = config.spatial_merge_size self.patch_size = self.config.vision_config.patch_size self.merging_layer = nn.Linear(hidden_size * self.spatial_merge_size**2, hidden_size, bias=False) def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor) -> torch.Tensor: image_sizes = [ (image_size[0] // self.patch_size, image_size[1] // self.patch_size) for image_size in image_sizes ] tokens_per_image = [h * w for h, w in image_sizes] d = image_features.shape[-1] permuted_tensor = [] for image_index, image_tokens in enumerate(image_features.split(tokens_per_image)): # Reshape image_tokens into a 2D grid h, w = image_sizes[image_index] image_grid = image_tokens.view(h, w, d).permute(2, 0, 1).unsqueeze(0) grid = torch.nn.functional.unfold( image_grid, kernel_size=self.spatial_merge_size, stride=self.spatial_merge_size ) grid = grid.view(d * self.spatial_merge_size**2, -1).t() permuted_tensor.append(grid) image_features = torch.cat(permuted_tensor, dim=0) image_features = self.merging_layer(image_features) return image_features class Mistral3MultiModalProjector(nn.Module): def __init__(self, config: Mistral3Config): super().__init__() self.norm = Mistral3RMSNorm(config.vision_config.hidden_size, eps=config.text_config.rms_norm_eps) self.patch_merger = Mistral3PatchMerger(config) # We have hidden_size * the number of vision feature layers num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) self.linear_1 = nn.Linear( config.vision_config.hidden_size * num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear( config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias ) def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor): image_features = self.norm(image_features) image_features = self.patch_merger(image_features, image_sizes) hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class Mistral3CausalLMOutputWithPast(LlavaCausalLMOutputWithPast): pass class Mistral3ModelOutputWithPast(LlavaModelOutputWithPast): pass class Mistral3PreTrainedModel(LlavaPreTrainedModel): pass class Mistral3Model(LlavaModel): def get_image_features( self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, **kwargs, ): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`): The tensors corresponding to the input images. vision_feature_layer (`Union[int, list[int]]`, *optional*): The index of the layer to select the vision feature. If multiple indices are provided, the vision feature of the corresponding indices will be concatenated to form the vision features. image_sizes (`torch.Tensor`, *optional*): Tensor containing the image sizes as returned by the processor. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) kwargs = {k: v for k, v in kwargs.items() if v is not None} # this is not memory efficient at all (output_hidden_states=True) will save all the hidden states. image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): selected_image_feature = image_outputs.hidden_states[vision_feature_layer] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes) downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size split_sizes = [(height // downsample_ratio) * (width // downsample_ratio) for height, width in image_sizes] image_features = torch.split(image_features.squeeze(0), split_sizes) return image_features def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layer: Optional[Union[int, list[int]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, image_sizes: torch.Tensor = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, Mistral3ModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layer = ( vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features( pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, image_sizes=image_sizes, ) image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return Mistral3ModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) class Mistral3ForConditionalGeneration(LlavaForConditionalGeneration): def get_image_features( self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, list[int]]] = None, **kwargs, ): return self.model.get_image_features( pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, **kwargs, ) def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, image_sizes: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Mistral3CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Mistral3ForConditionalGeneration >>> model = Mistral3ForConditionalGeneration.from_pretrained("mistralai/Mistral-Small-3.1-24B-Instruct-2503") >>> processor = AutoProcessor.from_pretrained("mistralai/Mistral-Small-3.1-24B-Instruct-2503") >>> prompt = "<s>[INST][IMG]What is the image?[/INST]" >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=15) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is the image?The image depicts two cats lying on a pink blanket." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return Mistral3CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) __all__ = [ "Mistral3Model", "Mistral3PreTrainedModel", # noqa "Mistral3ForConditionalGeneration", ]
transformers/src/transformers/models/mistral3/modular_mistral3.py/0
{ "file_path": "transformers/src/transformers/models/mistral3/modular_mistral3.py", "repo_id": "transformers", "token_count": 6028 }
492
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Processor class for Mllama.""" from typing import Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, make_nested_list_of_images from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput class MllamaImagesKwargs(ImagesKwargs, total=False): max_image_tiles: Optional[int] class MllamaProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: MllamaImagesKwargs _defaults = { "image_kwargs": { "max_image_tiles": 4, }, } def get_cross_attention_token_mask(input_ids: list[int], image_token_id: int) -> list[list[int]]: """ Generate a cross-attention token mask for image tokens in the input sequence. This function identifies the positions of image tokens in the input sequence and creates a mask that defines which subsequent tokens each image token should attend to. Args: input_ids (list[int]): A list of token ids representing the input sequence. image_token_id (int): The id of the token used to represent images in the sequence. Returns: list[list[int]]: A list of [start, end] pairs, where each pair represents the range of tokens an image token should attend to. Notes: - If no image tokens are present, an empty list is returned. - For a single image token, it attends to all subsequent tokens until the end of the sequence. - For multiple image tokens, each attends to tokens up to the next image token or the end of the sequence. - Consecutive image tokens are treated as a group and attend to all subsequent tokens together. """ image_token_locations = [i for i, token in enumerate(input_ids) if token == image_token_id] if len(image_token_locations) == 0: return [] # only one image present, unmask until end of sequence if len(image_token_locations) == 1: return [[image_token_locations[0], -1]] vision_masks = [[loc1, loc2] for loc1, loc2 in zip(image_token_locations[:-1], image_token_locations[1:])] # last image will attend to all subsequent text vision_masks.append([image_token_locations[-1], len(input_ids)]) # if there are two or more consecutive vision tokens, # they should all attend to all subsequent # text present last_mask_end = vision_masks[-1][1] for vision_mask in vision_masks[::-1]: if vision_mask[0] == vision_mask[1] - 1: vision_mask[1] = last_mask_end last_mask_end = vision_mask[1] return vision_masks def convert_sparse_cross_attention_mask_to_dense( cross_attention_token_mask: list[list[list[int]]], num_tiles: list[list[int]], max_num_tiles: int, length: int, ) -> np.ndarray: """ Convert the cross attention mask indices to a cross attention mask 4D array. This function takes a sparse representation of cross attention masks and converts it to a dense 4D numpy array. The sparse representation is a nested list structure that defines attention ranges for each image in each batch item. Args: cross_attention_token_mask (list[list[list[int]]]): A nested list structure where: - The outer list represents the batch dimension. - The middle list represents different images within each batch item. - The inner list contains pairs of integers [start, end] representing token ranges for each image. num_tiles (list[list[int]]): A nested list structure specifying the number of tiles for each image in each batch item. max_num_tiles (int): The maximum possible number of tiles. length (int): The total sequence length of the input. Returns: np.ndarray: A 4D numpy array of shape (batch_size, length, max_num_images, max_num_tiles) The array contains `1` where attention is allowed and `0` where it is not. Note: - Special handling is done for cases where the end token is -1, which is interpreted as attending to the end of the sequence. """ batch_size = len(cross_attention_token_mask) max_num_images = max([len(masks) for masks in cross_attention_token_mask]) cross_attention_mask = np.zeros( shape=(batch_size, length, max_num_images, max_num_tiles), dtype=np.int64, ) for sample_idx, (sample_masks, sample_num_tiles) in enumerate(zip(cross_attention_token_mask, num_tiles)): for mask_idx, (locations, mask_num_tiles) in enumerate(zip(sample_masks, sample_num_tiles)): if len(locations) == 2: start, end = locations end = min(end, length) if end == -1: end = length cross_attention_mask[sample_idx, start:end, mask_idx, :mask_num_tiles] = 1 return cross_attention_mask def build_string_from_input(prompt: str, bos_token: str, image_token: str) -> str: """ Builds a string from the input prompt by adding `bos_token` if not already present. Args: prompt (`str`): The input prompt string. bos_token (`str`): The beginning of sentence token to be added. image_token (`str`): The image token used to identify the start of an image sequence. Returns: str: The modified prompt string with the `bos_token` added if necessary. Examples: >>> build_string_from_input("Hello world", "<begin_of_text>", "<|image|>") '<begin_of_text>Hello world' >>> build_string_from_input("<|image|>Hello world", "<begin_of_text>", "<|image|>") '<|image|><begin_of_text>Hello world' >>> build_string_from_input("<begin_of_text>Hello world", "<begin_of_text>", "<|image|>") '<begin_of_text>Hello world' """ if bos_token in prompt: return prompt num_image_tokens_on_start = 0 while prompt.startswith(image_token): prompt = prompt[len(image_token) :] num_image_tokens_on_start += 1 return f"{image_token * num_image_tokens_on_start}{bos_token}{prompt}" class MllamaProcessor(ProcessorMixin): r""" Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and [`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. The preferred way of passing kwargs is as a dictionary per modality, see usage example below. ```python from transformers import MllamaProcessor from PIL import Image processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision") processor( images=your_pil_image, text=["<|image|>If I had to write a haiku for this one"], images_kwargs = {"size": {"height": 448, "width": 448}}, text_kwargs = {"padding": "right"}, common_kwargs = {"return_tensors": "pt"}, ) ``` Args: image_processor ([`MllamaImageProcessor`]): The image processor is a required input. tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "MllamaImageProcessor" tokenizer_class = "PreTrainedTokenizerFast" def __init__(self, image_processor, tokenizer, chat_template=None): if not hasattr(tokenizer, "image_token"): self.image_token = "<|image|>" self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) else: self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id self.python_token = "<|python_tag|>" self.python_token_id = tokenizer.convert_tokens_to_ids(self.python_token) self.bos_token = tokenizer.bos_token super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__( self, images: Optional[ImageInput] = None, text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None, audio=None, videos=None, **kwargs: Unpack[MllamaProcessorKwargs], ) -> BatchFeature: """ Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` arguments to MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask """ if text is None and images is None: raise ValueError("You must specify either text or images.") output_kwargs = self._merge_kwargs( MllamaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) text_kwargs = output_kwargs["text_kwargs"] text_kwargs["return_tensors"] = None images_kwargs = output_kwargs["images_kwargs"] common_kwargs = output_kwargs["common_kwargs"] data = {} if text is not None: if isinstance(text, str): text = [text] elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)): raise ValueError("Invalid input text. Please provide a string, or a list of strings") n_images_in_text = [t.count(self.image_token) for t in text] text = [build_string_from_input(text_item, self.bos_token, self.image_token) for text_item in text] _ = text_kwargs.pop("padding_side", None) # hack until padding-side is an accepted kwarg by tokenizers encoding = self.tokenizer(text, **text_kwargs) self._check_special_mm_tokens(text, encoding, modalities=["image"]) n_images_in_ids = [token_ids.count(self.image_token_id) for token_ids in encoding["input_ids"]] data.update(encoding) n_images_in_images = [0] if images is not None: images = make_nested_list_of_images(images) n_images_in_images = [len(sample) for sample in images] if text is not None: if any(batch_img == 0 for batch_img in n_images_in_text) and not all( batch_img == 0 for batch_img in n_images_in_text ): raise ValueError( "If a batch of text is provided, there should be either no images or at least one image per sample" ) if sum(n_images_in_text) > 0 and ( n_images_in_images != n_images_in_text or n_images_in_ids != n_images_in_images ): if images is None: raise ValueError("No image were provided, but there are image tokens in the prompt") else: add_message = "" if sum(n_images_in_images) == sum(n_images_in_text) and n_images_in_images != n_images_in_text: add_message = "Make sure to pass your images as a nested list, where each sub-list holds images per batch" elif n_images_in_ids != n_images_in_images: add_message = "If you activated truncation with `max_length`, increase the `max_length` so image tokens aren't cropped." raise ValueError( f"The number of image tokens in each text ({n_images_in_text}) should be the same as the " f"number of provided images per batch ({n_images_in_images}). {add_message}" ) if images is not None: image_features = self.image_processor(images, **images_kwargs) num_tiles = image_features.pop("num_tiles") data.update(image_features) # Create cross attention mask if images is not None and text is not None: cross_attention_token_mask = [ get_cross_attention_token_mask(token_ids, self.image_token_id) for token_ids in encoding["input_ids"] ] cross_attention_mask = convert_sparse_cross_attention_mask_to_dense( cross_attention_token_mask, num_tiles=num_tiles, max_num_tiles=self.image_processor.max_image_tiles, length=max(len(input_ids) for input_ids in encoding["input_ids"]), ) data["cross_attention_mask"] = cross_attention_mask return_tensors = common_kwargs.pop("return_tensors", None) batch_feature = BatchFeature(data=data, tensor_type=return_tensors) return batch_feature def post_process_image_text_to_text( self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs ): """ Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `list[str]`: The decoded text. """ return self.tokenizer.batch_decode( generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names # Remove `num_tiles`, it is popped and used only when processing. Make a copy of list when removing # otherwise `self.image_processor.model_input_names` is also modified image_processor_input_names = [name for name in image_processor_input_names if name != "num_tiles"] return list(tokenizer_input_names + image_processor_input_names + ["cross_attention_mask"]) __all__ = ["MllamaProcessor"]
transformers/src/transformers/models/mllama/processing_mllama.py/0
{ "file_path": "transformers/src/transformers/models/mllama/processing_mllama.py", "repo_id": "transformers", "token_count": 7122 }
493
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert MobileViT checkpoints from the ml-cvnets library.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_mobilevit_config(mobilevit_name): config = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: config.hidden_sizes = [144, 192, 240] config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: config.hidden_sizes = [96, 120, 144] config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: config.hidden_sizes = [64, 80, 96] config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320] config.hidden_dropout_prob = 0.05 config.expand_ratio = 2.0 if mobilevit_name.startswith("deeplabv3_"): config.image_size = 512 config.output_stride = 16 config.num_labels = 21 filename = "pascal-voc-id2label.json" else: config.num_labels = 1000 filename = "imagenet-1k-id2label.json" repo_id = "huggingface/label-files" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(name, base_model=False): for i in range(1, 6): if f"layer_{i}." in name: name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.") if "conv_1." in name: name = name.replace("conv_1.", "conv_stem.") if ".block." in name: name = name.replace(".block.", ".") if "exp_1x1" in name: name = name.replace("exp_1x1", "expand_1x1") if "red_1x1" in name: name = name.replace("red_1x1", "reduce_1x1") if ".local_rep.conv_3x3." in name: name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.") if ".local_rep.conv_1x1." in name: name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.") if ".norm." in name: name = name.replace(".norm.", ".normalization.") if ".conv." in name: name = name.replace(".conv.", ".convolution.") if ".conv_proj." in name: name = name.replace(".conv_proj.", ".conv_projection.") for i in range(0, 2): for j in range(0, 4): if f".{i}.{j}." in name: name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.") for i in range(2, 6): for j in range(0, 4): if f".{i}.{j}." in name: name = name.replace(f".{i}.{j}.", f".{i}.") if "expand_1x1" in name: name = name.replace("expand_1x1", "downsampling_layer.expand_1x1") if "conv_3x3" in name: name = name.replace("conv_3x3", "downsampling_layer.conv_3x3") if "reduce_1x1" in name: name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1") for i in range(2, 5): if f".global_rep.{i}.weight" in name: name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight") if f".global_rep.{i}.bias" in name: name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias") if ".global_rep." in name: name = name.replace(".global_rep.", ".transformer.") if ".pre_norm_mha.0." in name: name = name.replace(".pre_norm_mha.0.", ".layernorm_before.") if ".pre_norm_mha.1.out_proj." in name: name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.") if ".pre_norm_ffn.0." in name: name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.") if ".pre_norm_ffn.1." in name: name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.") if ".pre_norm_ffn.4." in name: name = name.replace(".pre_norm_ffn.4.", ".output.dense.") if ".transformer." in name: name = name.replace(".transformer.", ".transformer.layer.") if ".aspp_layer." in name: name = name.replace(".aspp_layer.", ".") if ".aspp_pool." in name: name = name.replace(".aspp_pool.", ".") if "seg_head." in name: name = name.replace("seg_head.", "segmentation_head.") if "segmentation_head.classifier.classifier." in name: name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.") if "classifier.fc." in name: name = name.replace("classifier.fc.", "classifier.") elif (not base_model) and ("segmentation_head." not in name): name = "mobilevit." + name return name def convert_state_dict(orig_state_dict, model, base_model=False): if base_model: model_prefix = "" else: model_prefix = "mobilevit." for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if key[:8] == "encoder.": key = key[8:] if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[0][6:]) - 1 transformer_num = int(key_split[3]) layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}") dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size prefix = ( f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention." ) if "weight" in key: orig_state_dict[prefix + "query.weight"] = val[:dim, :] orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :] orig_state_dict[prefix + "value.weight"] = val[-dim:, :] else: orig_state_dict[prefix + "query.bias"] = val[:dim] orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2] orig_state_dict[prefix + "value.bias"] = val[-dim:] else: orig_state_dict[rename_key(key, base_model)] = val return orig_state_dict # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our MobileViT structure. """ config = get_mobilevit_config(mobilevit_name) # load original state_dict state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True) # load 🤗 model if mobilevit_name.startswith("deeplabv3_"): model = MobileViTForSemanticSegmentation(config).eval() else: model = MobileViTForImageClassification(config).eval() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # Check outputs on an image, prepared by MobileViTImageProcessor image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32) encoding = image_processor(images=prepare_img(), return_tensors="pt") outputs = model(**encoding) logits = outputs.logits if mobilevit_name.startswith("deeplabv3_"): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": expected_logits = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": expected_logits = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": expected_logits = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}") assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}") assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model_mapping = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub...") model_name = model_mapping[mobilevit_name] image_processor.push_to_hub(model_name, organization="apple") model.push_to_hub(model_name, organization="apple") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs'," " 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
transformers/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py", "repo_id": "transformers", "token_count": 5872 }
494
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_modernbert_decoder.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 Johns Hopkins University, LightOn, and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Callable from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...models.modernbert.modeling_modernbert import ( ModernBertEmbeddings, ModernBertMLP, ModernBertPredictionHead, ModernBertPreTrainedModel, ModernBertRotaryEmbedding, apply_rotary_pos_emb, ) from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from .configuration_modernbert_decoder import ModernBertDecoderConfig logger = logging.get_logger(__name__) def eager_attention_forward( module: "ModernBertDecoderAttention", query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], dropout: float = 0.0, scaling: Optional[float] = None, sliding_window: Optional[int] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """A simple eager attention implementation for ModernBERT decoder.""" if scaling is None: scaling = module.head_dim**-0.5 # Compute attention scores attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling # Use the pre-computed attention mask causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class ModernBertDecoderAttention(nn.Module): """Performs causal multi-headed self attention for ModernBERT decoder. It supports both local attention (sliding window) and global attention patterns. """ def __init__(self, config: ModernBertDecoderConfig, layer_idx: Optional[int] = None): super().__init__() self.is_sliding = config.layer_types[layer_idx] == "sliding_attention" self.config = config self.layer_idx = layer_idx self.head_dim = config.hidden_size // config.num_attention_heads self.num_heads = config.num_attention_heads self.all_head_size = self.head_dim * self.num_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = self.config.attention_dropout self.is_causal = True if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) # NOTE: this is different than ModernBERT (separated QKV) so be sure to adapt to this self.q_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias) self.k_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias) self.v_proj = nn.Linear(self.config.hidden_size, self.all_head_size, bias=self.config.attention_bias) self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.out_drop = nn.Dropout(config.attention_dropout) self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor, attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=self.attention_dropout if self.training else 0.0, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.out_drop(self.Wo(attn_output)) return attn_output, attn_weights class ModernBertDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: ModernBertDecoderConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx self.attention_type = config.layer_types[layer_idx] self.attn_norm = ( nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) if layer_idx != 0 else nn.Identity() ) self.attn = ModernBertDecoderAttention(config=config, layer_idx=layer_idx) self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.mlp = ModernBertMLP(config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings_global: torch.Tensor, position_embeddings_local: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.attn_norm(hidden_states) # apply global RoPE to non-sliding layer only if self.attn.is_sliding: position_embeddings = position_embeddings_local else: position_embeddings = position_embeddings_global # Self Attention attn_outputs = self.attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) hidden_states = attn_outputs[0] # Add residual connection hidden_states = residual + hidden_states # MLP residual = hidden_states hidden_states = self.mlp_norm(hidden_states) mlp_output = self.mlp(hidden_states) hidden_states = residual + mlp_output return hidden_states @auto_docstring class ModernBertDecoderPreTrainedModel(ModernBertPreTrainedModel): config: ModernBertDecoderConfig _skip_keys_device_placement = ["past_key_values"] _no_split_modules = ["ModernBertDecoderLayer"] _can_compile_fullgraph = False _supports_attention_backend = True _can_record_outputs = { "hidden_states": ModernBertDecoderLayer, "attentions": ModernBertDecoderAttention, } def _init_weights(self, module: nn.Module): cutoff_factor = self.config.initializer_cutoff_factor if cutoff_factor is None: cutoff_factor = 3 def init_weight(module: nn.Module, std: float): nn.init.trunc_normal_( module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std, ) if isinstance(module, nn.Linear): if module.bias is not None: nn.init.zeros_(module.bias) stds = { "in": self.config.initializer_range, "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), "embedding": self.config.initializer_range, "final_out": self.config.hidden_size**-0.5, } if isinstance(module, ModernBertEmbeddings): init_weight(module.tok_embeddings, stds["embedding"]) elif isinstance(module, ModernBertMLP): init_weight(module.Wi, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertDecoderAttention): init_weight(module.q_proj, stds["in"]) init_weight(module.k_proj, stds["in"]) init_weight(module.v_proj, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertPredictionHead): init_weight(module.dense, stds["out"]) elif isinstance(module, ModernBertDecoderForSequenceClassification): init_weight(module.classifier, stds["final_out"]) elif isinstance(module, ModernBertDecoderForCausalLM): init_weight(module.decoder, stds["out"]) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) if module.bias is not None: module.bias.data.zero_() def _check_and_adjust_attn_implementation( self, attn_implementation: Optional[str], is_init_check: bool = False ) -> str: """We overwrite this to make sdpa the first selection again if nothing was requested.""" try: attn_implementation = ( "sdpa" if attn_implementation is None and self._sdpa_can_dispatch() else attn_implementation ) except (ValueError, ImportError): pass return super()._check_and_adjust_attn_implementation(attn_implementation, is_init_check) @auto_docstring class ModernBertDecoderModel(ModernBertDecoderPreTrainedModel): def __init__(self, config: ModernBertDecoderConfig): super().__init__(config) self.config = config self.embeddings = ModernBertEmbeddings(config) self.layers = nn.ModuleList( [ModernBertDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.gradient_checkpointing = False self.global_rotary_emb = ModernBertRotaryEmbedding(config=config) self.local_rotary_emb = ModernBertRotaryEmbedding(config=config) self.post_init() def get_input_embeddings(self): return self.embeddings.tok_embeddings def set_input_embeddings(self, value): self.embeddings.tok_embeddings = value @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPast]: if (input_ids is None) == (inputs_embeds is None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) batch_size, seq_length = input_ids.shape[:2] else: batch_size, seq_length = inputs_embeds.shape[:2] # Handle past_key_values and cache setup if use_cache and past_key_values is None and not self.training: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + seq_length, device=input_ids.device if input_ids is not None else inputs_embeds.device, ) if position_ids is None: position_ids = cache_position.unsqueeze(0).expand(batch_size, -1) # Calculate embeddings hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": hidden_states, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), "sliding_attention": create_sliding_window_causal_mask(**mask_kwargs), } # create position embeddings to be shared across the decoder layers position_embeddings_global = self.global_rotary_emb(hidden_states, position_ids) position_embeddings_local = self.local_rotary_emb(hidden_states, position_ids) for idx, decoder_layer in enumerate(self.layers): hidden_states = decoder_layer( hidden_states, position_embeddings_global=position_embeddings_global, position_embeddings_local=position_embeddings_local, attention_mask=causal_mask_mapping[decoder_layer.attention_type], past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.final_norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) @auto_docstring( custom_intro=""" The ModernBert Decoder Model with a language modeling head on top for causal language modeling (CLM). """ ) class ModernBertDecoderForCausalLM(ModernBertDecoderPreTrainedModel, GenerationMixin): _tied_weights_keys = ["decoder.weight"] def __init__(self, config: ModernBertDecoderConfig): super().__init__(config) self.config = config self.model = ModernBertDecoderModel(config) self.lm_head = ModernBertPredictionHead(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, new_embeddings): self.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs, ) -> Union[tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: [`~modeling_outputs.CausalLMOutputWithPast`] comprising various elements depending on the configuration and inputs. Example: ```python >>> from transformers import AutoTokenizer, ModernBertDecoderForCausalLM >>> model = ModernBertDecoderForCausalLM.from_pretrained("blab-jhu/test-32m-dec") >>> tokenizer = AutoTokenizer.from_pretrained("blab-jhu/test-32m-dec") >>> prompt = "The capital of France is" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=1) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "The capital of France is Paris" ``` """ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs, ) hidden_states = outputs[0] logits = self.decoder(self.lm_head(hidden_states)) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Decoder Model with a sequence classification head on top (linear layer). [`ModernBertDecoderForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1, GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class ModernBertDecoderForSequenceClassification(ModernBertDecoderPreTrainedModel): def __init__(self, config: ModernBertDecoderConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertDecoderModel(config) self.head = ModernBertPredictionHead(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels, bias=config.classifier_bias) self.drop = torch.nn.Dropout(config.classifier_dropout) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring(checkpoint="blab-jhu/test-32m-dec") def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs, ) hidden_states = transformer_outputs[0] hidden_states = self.drop(self.head(hidden_states)) logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "ModernBertDecoderModel", "ModernBertDecoderPreTrainedModel", "ModernBertDecoderForCausalLM", "ModernBertDecoderForSequenceClassification", ]
transformers/src/transformers/models/modernbert_decoder/modeling_modernbert_decoder.py/0
{ "file_path": "transformers/src/transformers/models/modernbert_decoder/modeling_modernbert_decoder.py", "repo_id": "transformers", "token_count": 11553 }
495
# coding=utf-8 # Copyright 2023 Meta AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MusicGen model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) class MusicgenDecoderConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`MusicgenDecoder`]. It is used to instantiate a MusicGen decoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MusicGen [facebook/musicgen-small](https://huggingface.co/facebook/musicgen-small) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 2048): Vocabulary size of the MusicgenDecoder model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MusicgenDecoder`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of decoder layers. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer block. ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_factor (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(hidden_size). use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models) num_codebooks (`int`, *optional*, defaults to 4): The number of parallel codebooks forwarded to the model. tie_word_embeddings(`bool`, *optional*, defaults to `False`): Whether input and output word embeddings should be tied. audio_channels (`int`, *optional*, defaults to 1 Number of channels in the audio data. Either 1 for mono or 2 for stereo. Stereo models generate a separate audio stream for the left/right output channels. Mono models generate a single audio stream output. """ model_type = "musicgen_decoder" base_config_key = "decoder_config" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=2048, max_position_embeddings=2048, num_hidden_layers=24, ffn_dim=4096, num_attention_heads=16, layerdrop=0.0, use_cache=True, activation_function="gelu", hidden_size=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, initializer_factor=0.02, scale_embedding=False, num_codebooks=4, audio_channels=1, pad_token_id=2048, bos_token_id=2048, eos_token_id=None, tie_word_embeddings=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.initializer_factor = initializer_factor self.layerdrop = layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.num_codebooks = num_codebooks if audio_channels not in [1, 2]: raise ValueError(f"Expected 1 (mono) or 2 (stereo) audio channels, got {audio_channels} channels.") self.audio_channels = audio_channels super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) class MusicgenConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MusicgenModel`]. It is used to instantiate a MusicGen model according to the specified arguments, defining the text encoder, audio encoder and MusicGen decoder configs. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: kwargs (*optional*): Dictionary of keyword arguments. Notably: - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the text encoder config. - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the audio encoder config. - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines the decoder config. Example: ```python >>> from transformers import ( ... MusicgenConfig, ... MusicgenDecoderConfig, ... T5Config, ... EncodecConfig, ... MusicgenForConditionalGeneration, ... ) >>> # Initializing text encoder, audio encoder, and decoder model configurations >>> text_encoder_config = T5Config() >>> audio_encoder_config = EncodecConfig() >>> decoder_config = MusicgenDecoderConfig() >>> configuration = MusicgenConfig.from_sub_models_config( ... text_encoder_config, audio_encoder_config, decoder_config ... ) >>> # Initializing a MusicgenForConditionalGeneration (with random weights) from the facebook/musicgen-small style configuration >>> model = MusicgenForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> config_text_encoder = model.config.text_encoder >>> config_audio_encoder = model.config.audio_encoder >>> config_decoder = model.config.decoder >>> # Saving the model, including its configuration >>> model.save_pretrained("musicgen-model") >>> # loading model and config from pretrained folder >>> musicgen_config = MusicgenConfig.from_pretrained("musicgen-model") >>> model = MusicgenForConditionalGeneration.from_pretrained("musicgen-model", config=musicgen_config) ```""" model_type = "musicgen" sub_configs = { "text_encoder": AutoConfig, "audio_encoder": AutoConfig, "decoder": MusicgenDecoderConfig, } has_no_defaults_at_init = True def __init__(self, **kwargs): super().__init__(**kwargs) if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs: raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config") text_encoder_config = kwargs.pop("text_encoder") text_encoder_model_type = text_encoder_config.pop("model_type") audio_encoder_config = kwargs.pop("audio_encoder") audio_encoder_model_type = audio_encoder_config.pop("model_type") decoder_config = kwargs.pop("decoder") self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config) self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config) self.decoder = MusicgenDecoderConfig(**decoder_config) self.is_encoder_decoder = True self.initializer_factor = self.decoder.initializer_factor @classmethod def from_sub_models_config( cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenDecoderConfig, **kwargs, ): r""" Instantiate a [`MusicgenConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenConfig`]: An instance of a configuration object """ return cls( text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs, ) @property # This is a property because you might want to change the codec model on the fly def sampling_rate(self): return self.audio_encoder.sampling_rate __all__ = ["MusicgenConfig", "MusicgenDecoderConfig"]
transformers/src/transformers/models/musicgen/configuration_musicgen.py/0
{ "file_path": "transformers/src/transformers/models/musicgen/configuration_musicgen.py", "repo_id": "transformers", "token_count": 4109 }
496
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for Nougat.""" from typing import Optional, Union from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import ( BaseImageProcessorFast, DefaultFastImageProcessorKwargs, group_images_by_shape, reorder_images, ) from ...image_transforms import ( get_resize_output_image_size, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_available(): if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F else: from torchvision.transforms import functional as F class NougatFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): """ Args: do_crop_margin (`bool`, *optional*, defaults to `True`): Whether to crop the image margins. do_thumbnail (`bool`, *optional*, defaults to `True`): Whether to resize the image using thumbnail method. do_align_long_axis (`bool`, *optional*, defaults to `False`): Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the images to the largest image size in the batch. """ do_crop_margin: Optional[bool] do_thumbnail: Optional[bool] do_align_long_axis: Optional[bool] do_pad: Optional[bool] @auto_docstring class NougatImageProcessorFast(BaseImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"height": 896, "width": 672} do_resize: bool = (True,) do_normalize: bool = True do_thumbnail: bool = True do_align_long_axis: bool = False do_pad: bool = True do_rescale = True do_crop_margin: bool = True valid_kwargs = NougatFastImageProcessorKwargs def __init__(self, **kwargs: Unpack[NougatFastImageProcessorKwargs]): super().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[NougatFastImageProcessorKwargs]) -> BatchFeature: return super().preprocess(images, **kwargs) def python_find_non_zero( self, image: "torch.Tensor", ): """This is a reimplementation of a findNonZero function equivalent to cv2.""" non_zero_indices = torch.nonzero(image, as_tuple=False) idxvec = non_zero_indices[:, [2, 1]] idxvec = idxvec.reshape(-1, 1, 2) return idxvec def python_bounding_rect(self, coordinates): """This is a reimplementation of a BoundingRect function equivalent to cv2.""" min_values = torch.amin(coordinates, axis=(0, 1)).to(torch.int) max_values = torch.amax(coordinates, axis=(0, 1)).to(torch.int) x_min, y_min = min_values[0], min_values[1] width = max_values[0] - x_min + 1 height = max_values[1] - y_min + 1 return x_min, y_min, width, height def crop_margin( self, image: "torch.Tensor", gray_threshold: int = 200, ) -> "torch.Tensor": """ Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). Args: image (`torch.Tensor`): The image to be cropped. gray_threshold (`int`, *optional*, defaults to `200`) Value below which pixels are considered to be gray. """ data = F.rgb_to_grayscale(image, num_output_channels=1) max_val = torch.max(data) min_val = torch.min(data) if max_val == min_val: return image data = (data - min_val) / (max_val - min_val) * 255 gray = data < gray_threshold coords = self.python_find_non_zero(gray) x_min, y_min, width, height = self.python_bounding_rect(coords) image = image[:, y_min : y_min + height, x_min : x_min + width] return image def align_long_axis( self, image: "torch.Tensor", size: SizeDict, ) -> "torch.Tensor": """ Align the long axis of the image to the longest axis of the specified size. Args: image (`torch.Tensor`): The image to be aligned. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to align the long axis to. Returns: `torch.Tensor`: The aligned image. """ input_height, input_width = image.shape[-2:] output_height, output_width = size.height, size.width if (output_width < output_height and input_width > input_height) or ( output_width > output_height and input_width < input_height ): image = torch.rot90(image, 3, dims=[1, 2]) return image def thumbnail( self, image: "torch.Tensor", size: SizeDict, ) -> "torch.Tensor": """ Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any corresponding dimension of the specified size. Args: image (`torch.tensor`): The image to be resized. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to resize the image to. """ input_height, input_width = image.shape[-2:] output_height, output_width = size.height, size.width # We always resize to the smallest of either the input or output size. height = min(input_height, output_height) width = min(input_width, output_width) if height == input_height and width == input_width: return image if input_height > input_width: width = int(input_width * height / input_height) elif input_width > input_height: height = int(input_height * width / input_width) new_size = (height, width) return F.resize(image, new_size, interpolation=F.InterpolationMode.BICUBIC) def pad_images( self, image: "torch.Tensor", size: SizeDict, ) -> "torch.Tensor": """ Pads a batch of images to the specified size at the top, bottom, left and right. Args: image (`torch.tensor`): The image to be padded. size (`Dict[str, int]`): The size `{"height": h, "width": w}` to pad the image to. """ input_height, input_width = image.shape[-2:] output_height, output_width = size.height, size.width delta_width = output_width - input_width delta_height = output_height - input_height pad_top = delta_height // 2 pad_left = delta_width // 2 pad_bottom = delta_height - pad_top pad_right = delta_width - pad_left padding = (pad_left, pad_top, pad_right, pad_bottom) return F.pad(image, padding) def resize( self, image: "torch.Tensor", size: SizeDict, interpolation: "F.InterpolationMode" = None, antialias: bool = True, **kwargs, ) -> "torch.Tensor": """ Resize an image to `(size["height"], size["width"])`. Args: image (`torch.Tensor`): Image to resize. size (`SizeDict`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`): `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`. Returns: `torch.Tensor`: The resized image. """ interpolation = interpolation if interpolation is not None else F.InterpolationMode.BICUBIC shortest_edge = min(size["height"], size["width"]) new_size = get_resize_output_image_size( image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST ) return F.resize(image, new_size, interpolation=interpolation, antialias=antialias) def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, do_align_long_axis: bool, do_thumbnail: bool, do_pad: bool, interpolation: Optional["F.InterpolationMode"], do_center_crop: bool, crop_size: SizeDict, do_rescale: bool, rescale_factor: float, do_normalize: bool, do_crop_margin: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: bool, return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Crop images images = [self.crop_margin(image) for image in images] # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_align_long_axis: stacked_images = self.align_long_axis(image=stacked_images, size=size) if do_resize: stacked_images = self.resize(image=stacked_images, size=size) if do_thumbnail: stacked_images = self.thumbnail(image=stacked_images, size=size) if do_pad: stacked_images = self.pad_images(image=stacked_images, size=size) resized_images_grouped[shape] = stacked_images resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["NougatImageProcessorFast"]
transformers/src/transformers/models/nougat/image_processing_nougat_fast.py/0
{ "file_path": "transformers/src/transformers/models/nougat/image_processing_nougat_fast.py", "repo_id": "transformers", "token_count": 4962 }
497
from typing import Callable, Optional import torch import torch.nn as nn from transformers.utils.generic import TransformersKwargs from ...cache_utils import Cache from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import logging from ...utils.deprecation import deprecate_kwarg from ..llama.modeling_llama import LlamaPreTrainedModel, LlamaRMSNorm, eager_attention_forward from ..olmo.configuration_olmo import OlmoConfig from ..olmo.modeling_olmo import ( OlmoAttention, OlmoDecoderLayer, OlmoForCausalLM, OlmoModel, OlmoRotaryEmbedding, apply_rotary_pos_emb, ) logger = logging.get_logger(__name__) class Olmo2Config(OlmoConfig): r""" This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50304): Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Olmo2Model`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 50279): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. ```python >>> from transformers import Olmo2Model, Olmo2Config >>> # Initializing a Olmo2 7B style configuration >>> configuration = Olmo2Config() >>> # Initializing a model from the Olmo2 7B style configuration >>> model = Olmo2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "olmo2" base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.k_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.v_proj": "colwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.self_attn.o_proj": "rowwise_rep", # we need to replicate here due to the added norm on q and k "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=50304, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, hidden_act="silu", max_position_embeddings=2048, initializer_range=0.02, use_cache=True, pad_token_id=1, bos_token_id=None, eos_token_id=50279, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, rms_norm_eps=1e-5, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, hidden_act=hidden_act, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, use_cache=use_cache, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, rope_theta=rope_theta, rope_scaling=rope_scaling, attention_bias=attention_bias, attention_dropout=attention_dropout, **kwargs, ) self.rms_norm_eps = rms_norm_eps del self.clip_qkv # OLMo2 RMS norm is identical to Llama RMS norm except: # - Weight and hidden states are multiplied before converting back to the input dtype, rather than after. class Olmo2RMSNorm(LlamaRMSNorm): def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return (self.weight * hidden_states).to(input_dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Olmo2 attention is identical to OLMo attention except: # - Norm is applied to attention queries and keys. # - No qkv clipping. class Olmo2Attention(OlmoAttention): def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None): super().__init__(config, layer_idx=layer_idx) self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps) self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_norm(self.q_proj(hidden_states)) key_states = self.k_norm(self.k_proj(hidden_states)) value_states = self.v_proj(hidden_states) query_states = query_states.view(hidden_shape).transpose(1, 2) key_states = key_states.view(hidden_shape).transpose(1, 2) value_states = value_states.view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights # The OLMo2 layers are identical to those of the OLMo model except: # - RMSNorm is used instead of standard layer norm. # - Norm is applied after attention/feedforward rather than before. class Olmo2DecoderLayer(OlmoDecoderLayer): def __init__(self, config: Olmo2Config, layer_idx: int): super().__init__(config, layer_idx=layer_idx) self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx) del self.input_layernorm @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: residual = hidden_states hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = self.post_feedforward_layernorm(hidden_states) hidden_states = residual + hidden_states return hidden_states class Olmo2RotaryEmbedding(OlmoRotaryEmbedding): pass class Olmo2PreTrainedModel(LlamaPreTrainedModel): pass # The OLMo2 model is identical to the OLMo model, except RMSNorm is used instead of # standard layer norm for the output norm. class Olmo2Model(OlmoModel): def __init__(self, config: Olmo2Config): super().__init__(config) self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layers = nn.ModuleList( [Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) # The heads now only need to redefine the model inside to the correct `RobertaModel` class Olmo2ForCausalLM(OlmoForCausalLM): pass __all__ = [ "Olmo2Config", "Olmo2ForCausalLM", "Olmo2Model", "Olmo2PreTrainedModel", # noqa: F822 ]
transformers/src/transformers/models/olmo2/modular_olmo2.py/0
{ "file_path": "transformers/src/transformers/models/olmo2/modular_olmo2.py", "repo_id": "transformers", "token_count": 5813 }
498
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for OneFormer """ from ...processing_utils import ProcessorMixin from ...utils import is_torch_available if is_torch_available(): import torch class OneFormerProcessor(ProcessorMixin): r""" Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. Args: image_processor ([`OneFormerImageProcessor`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. max_seq_len (`int`, *optional*, defaults to 77)): Sequence length for input text list. task_seq_len (`int`, *optional*, defaults to 77): Sequence length for input task token. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "OneFormerImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self, image_processor=None, tokenizer=None, max_seq_length: int = 77, task_seq_length: int = 77, **kwargs ): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") self.max_seq_length = max_seq_length self.task_seq_length = task_seq_length super().__init__(image_processor, tokenizer) def _preprocess_text(self, text_list=None, max_length=77): if text_list is None: raise ValueError("tokens cannot be None.") tokens = self.tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True) attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"] token_inputs = [] for attn_mask, input_id in zip(attention_masks, input_ids): token = torch.tensor(attn_mask) * torch.tensor(input_id) token_inputs.append(token.unsqueeze(0)) token_inputs = torch.cat(token_inputs, dim=0) return token_inputs def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): """ Main method to prepare for the model one or several task input(s) and image(s). This method forwards the `task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not `None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: task_inputs (`str`, `list[str]`): The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list of strings of the template "the task is {task}". images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if task_inputs is None: raise ValueError("You have to specify the task_input. Found None.") elif images is None: raise ValueError("You have to specify the image. Found None.") if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs): raise ValueError("task_inputs must be semantic, instance, or panoptic.") encoded_inputs = self.image_processor(images, task_inputs, segmentation_maps, **kwargs) if isinstance(task_inputs, str): task_inputs = [task_inputs] if isinstance(task_inputs, list) and all(isinstance(task_input, str) for task_input in task_inputs): task_token_inputs = [] for task in task_inputs: task_input = f"the task is {task}" task_token_inputs.append(task_input) encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) else: raise TypeError("Task Inputs should be a string or a list of strings.") if hasattr(encoded_inputs, "text_inputs"): texts_list = encoded_inputs.text_inputs text_inputs = [] for texts in texts_list: text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) text_inputs.append(text_input_list.unsqueeze(0)) encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0) return encoded_inputs def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the task_inputs. Please refer to the docstring of this method for more information. """ if task_inputs is None: raise ValueError("You have to specify the task_input. Found None.") elif images is None: raise ValueError("You have to specify the image. Found None.") if not all(task in ["semantic", "instance", "panoptic"] for task in task_inputs): raise ValueError("task_inputs must be semantic, instance, or panoptic.") encoded_inputs = self.image_processor.encode_inputs(images, task_inputs, segmentation_maps, **kwargs) if isinstance(task_inputs, str): task_inputs = [task_inputs] if isinstance(task_inputs, list) and all(isinstance(task_input, str) for task_input in task_inputs): task_token_inputs = [] for task in task_inputs: task_input = f"the task is {task}" task_token_inputs.append(task_input) encoded_inputs["task_inputs"] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) else: raise TypeError("Task Inputs should be a string or a list of strings.") if hasattr(encoded_inputs, "text_inputs"): texts_list = encoded_inputs.text_inputs text_inputs = [] for texts in texts_list: text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) text_inputs.append(text_input_list.unsqueeze(0)) encoded_inputs["text_inputs"] = torch.cat(text_inputs, dim=0) return encoded_inputs def post_process_semantic_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_semantic_segmentation(*args, **kwargs) def post_process_instance_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_instance_segmentation(*args, **kwargs) def post_process_panoptic_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs) __all__ = ["OneFormerProcessor"]
transformers/src/transformers/models/oneformer/processing_oneformer.py/0
{ "file_path": "transformers/src/transformers/models/oneformer/processing_oneformer.py", "repo_id": "transformers", "token_count": 3698 }
499
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import re import requests import torch from PIL import Image from transformers import ( AutoModelForCausalLM, AutoModelForImageTextToText, AutoProcessor, AutoTokenizer, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES from transformers.models.ovis2.configuration_ovis2 import Ovis2Config, Ovis2VisionConfig from transformers.models.ovis2.image_processing_ovis2 import Ovis2ImageProcessor from transformers.models.ovis2.modeling_ovis2 import Ovis2ForConditionalGeneration from transformers.models.ovis2.processing_ovis2 import Ovis2Processor from transformers.models.qwen2.configuration_qwen2 import Qwen2Config # Constants CONTEXT_LENGTH = 32768 # multimodal_max_length # fmt: off # Mapping from original model key patterns to HF key patterns ORIGINAL_TO_HF_MAPPING = { r"trunk.blocks\.(\d+)\.norm_1": r"encoder.layers.\1.rms_norm1", r"trunk.blocks\.(\d+)\.norm_2": r"encoder.layers.\1.rms_norm2", r"trunk.blocks\.(\d+)\.attn.proj": r"encoder.layers.\1.attention.out_proj", r"visual_tokenizer": r"model.vision_tower", r"backbone": r"transformer", r"preprocessor": r"embeddings", r"patchifier.proj": r"patch_embedding", r"patchifier.norm": r"rms_norm", r"trunk.post_trunk_norm": r"rms_norm", r"trunk.blocks": r"encoder.layers", r"mlp.fc1": r"ffn.gate_proj", r"mlp.fc2": r"ffn.down_proj", r"mlp.fc3": r"ffn.up_proj", r"head.0": r"head_linear", r"head.1": r"head_norm", r"vte.weight": r"model.visual_embeddings_table.weight", r"llm.model": r"model.language_model", r"llm.lm_head": r"lm_head", } # fmt: on # Special tokens for the tokenizer SPECIAL_TOKENS = [ "<IMG_ATOM>", "<IMG_START>", "<IMG_GRID>", "<IMG_COL>", "<IMG_ROW>", "<IMG_END>", ] # Configuration keys to ignore when converting UNNECESSARY_CONFIG_KEYS = [ "_name_or_path", "_attn_implementation_autoset", "auto_map", "use_bfloat16", "use_flash_attn", "qk_normalization", "bias", "norm_type", ] # Chat template for the tokenizer CHAT_TEMPLATE = ( "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" "{% for message in messages %}" "{{'<|im_start|>' + message['role'] + '\n'}}" "{% if message['content'] is string %}" "{{ message['content'] }}" "{% else %}" "{% for content in message['content'] %}" "{% if content['type'] == 'image' %}" "{{ '<image>\n' }}" "{% elif content['type'] == 'text' %}" "{{ content['text'] }}" "{% endif %}" "{% endfor %}" "{% endif %}" "{{'<|im_end|>\n'}}" "{% endfor %}" "{% if add_generation_prompt %}" "{{'<|im_start|>assistant\n' }}" "{% endif %}" ) def create_tokenizer(model_name_or_path, save_dir): """ Create and configure a tokenizer for the Ovis2 model. Args: model_name_or_path: Path to the source model or tokenizer save_dir: Directory to save the tokenizer to Returns: The configured tokenizer """ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, return_token_type_ids=False) tokenizer.model_max_length = CONTEXT_LENGTH tokenizer.add_special_tokens({"additional_special_tokens": SPECIAL_TOKENS}) tokenizer.chat_template = CHAT_TEMPLATE setattr(tokenizer, "image_token", "<IMG_ATOM>") # 151665 setattr(tokenizer, "image_token_id", tokenizer.convert_tokens_to_ids(tokenizer.image_token)) return tokenizer def create_image_processor(save_dir): """ Create and save an image processor for the Ovis2 model. Args: save_dir: Directory to save the image processor to Returns: The configured image processor """ image_processor = Ovis2ImageProcessor( crop_to_patches=True, size={"height": 448, "width": 448}, ) return image_processor def extract_vision_config_from_original(orig_config): """ Extract and format vision configuration from the original model config. Args: orig_config: Original model configuration Returns: dict: Cleaned vision configuration dictionary """ visual_tokenizer_config = orig_config.visual_tokenizer_config.to_dict() # backbone_config = visual_tokenizer_config.pop("backbone_config") # Copy required fields from backbone config visual_tokenizer_config["hidden_size"] = orig_config.visual_tokenizer_config.backbone_config.hidden_size visual_tokenizer_config["intermediate_size"] = ( orig_config.visual_tokenizer_config.backbone_config.intermediate_size ) visual_tokenizer_config["num_attention_heads"] = ( orig_config.visual_tokenizer_config.backbone_config.num_attention_heads ) visual_tokenizer_config["num_hidden_layers"] = ( orig_config.visual_tokenizer_config.backbone_config.num_hidden_layers ) visual_tokenizer_config["rms_norm_eps"] = orig_config.visual_tokenizer_config.backbone_config.rms_norm_eps visual_tokenizer_config["image_size"] = orig_config.visual_tokenizer_config.backbone_config.image_size visual_tokenizer_config["num_channels"] = orig_config.visual_tokenizer_config.backbone_config.num_channels visual_tokenizer_config["patch_size"] = orig_config.visual_tokenizer_config.backbone_config.patch_size visual_tokenizer_config["qkv_bias"] = orig_config.visual_tokenizer_config.backbone_config.qkv_bias # Remove unnecessary keys return {k: v for k, v in visual_tokenizer_config.items() if k not in UNNECESSARY_CONFIG_KEYS} def get_ovis2_config(model_name_or_path): """ Create an Ovis2 configuration from the original model. Args: model_name_or_path: Path to the original model Returns: Ovis2Config: Configuration for the HF implementation """ orig_config = AutoModelForCausalLM.from_pretrained( model_name_or_path, trust_remote_code=True, ).config # Extract and clean LLM config llm_config = orig_config.llm_config.to_dict() llm_config = {k: v for k, v in llm_config.items() if k not in UNNECESSARY_CONFIG_KEYS} # Extract and clean vision config visual_tokenizer_config = extract_vision_config_from_original(orig_config) return Ovis2Config( text_config=Qwen2Config(**llm_config), vision_config=Ovis2VisionConfig(**visual_tokenizer_config), hidden_size=llm_config["hidden_size"], vocab_size=llm_config["vocab_size"], initializer_range=llm_config["initializer_range"], ) def load_orig_state_dict(model_name_or_path): """ Load the state dictionary from the original model. Args: model_name_or_path: Path to the original model Returns: dict: Original model state dictionary """ model = AutoModelForCausalLM.from_pretrained( model_name_or_path, dtype=torch.bfloat16, trust_remote_code=True, ).eval() return model.state_dict() def convert_orig2hf(state_dict, dim): """ Convert original state dictionary keys to HF format. Args: state_dict: Original state dictionary dim: Hidden dimension for splitting QKV weights Returns: dict: Converted state dictionary for HF model """ new_state_dict = {} for key, val in state_dict.items(): orig_key = key # Apply regex pattern replacements for pattern, replacement in ORIGINAL_TO_HF_MAPPING.items(): key = re.sub(pattern, replacement, key) # Handle special cases if "attn.qkv" in key: # Split QKV into separate Q, K, V matrices new_key_query = key.replace("attn.qkv", "attention.q_proj") new_state_dict[new_key_query] = state_dict[orig_key][:dim] new_key_key = key.replace("attn.qkv", "attention.k_proj") new_state_dict[new_key_key] = state_dict[orig_key][dim : 2 * dim] new_key_value = key.replace("attn.qkv", "attention.v_proj") new_state_dict[new_key_value] = state_dict[orig_key][-dim:] elif "pos_embed" in key: new_key = key.replace("pos_embed", "position_embedding.weight") new_state_dict[new_key] = state_dict[orig_key][0] else: new_state_dict[key] = val return new_state_dict def convert_model(model_name_or_path): """ Convert and save the model in HF format. Args: model_name_or_path: Path to the original model save_dir: Directory to save the converted model Returns: The converted model """ config = get_ovis2_config(model_name_or_path) config.architectures = ["Ovis2ForConditionalGeneration"] # Load and convert weights orig_state_dict = load_orig_state_dict(model_name_or_path) new_state_dict = convert_orig2hf(orig_state_dict, config.vision_config.hidden_size) # Create model and load converted weights model = Ovis2ForConditionalGeneration(config) missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) # Report any issues with weight loading if missing_keys: print(f"Missing keys: {missing_keys}") if unexpected_keys: print(f"Unexpected keys: {unexpected_keys}") return model def main(): """Process command line arguments and execute the conversion pipeline.""" parser = argparse.ArgumentParser(description="Convert Ovis2 model to HF format") parser.add_argument( "--model_name_or_path", default="AIDC-AI/Ovis2-2B", choices=[ "AIDC-AI/Ovis2-1B", "AIDC-AI/Ovis2-2B", "AIDC-AI/Ovis2-4B", "AIDC-AI/Ovis2-8B", "AIDC-AI/Ovis2-16B", "AIDC-AI/Ovis2-34B", ], help="Location of original Ovis2 model", ) parser.add_argument("--save_dir", default="Ovis2-2B-hf", help="Location to write HF model and processors") parser.add_argument("--hub_dir", default="thisisiron/Ovis2-2B-hf", help="Hub repository name if pushing to hub") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the converted model to the Hugging Face hub" ) args = parser.parse_args() # Execute conversion pipeline print(f"Converting model from {args.model_name_or_path} to {args.save_dir}") # If already included in the transformers library, remove to avoid duplication. if "aimv2" in CONFIG_MAPPING_NAMES: CONFIG_MAPPING_NAMES.pop("aimv2") tokenizer = create_tokenizer( model_name_or_path=args.model_name_or_path, save_dir=args.save_dir, ) image_processor = create_image_processor( save_dir=args.save_dir, ) os.makedirs(args.save_dir, exist_ok=True) # Convert and save the model model = convert_model(model_name_or_path=args.model_name_or_path) model.save_pretrained(args.save_dir) # Save the processor processor = Ovis2Processor(tokenizer=tokenizer, image_processor=image_processor, chat_template=CHAT_TEMPLATE) processor.save_pretrained(args.save_dir) # Push to hub if requested if args.push_to_hub: processor.push_to_hub(args.hub_dir, use_temp_dir=True) model.push_to_hub(args.hub_dir, use_temp_dir=True) model = ( AutoModelForImageTextToText.from_pretrained( args.save_dir, dtype=torch.bfloat16, ) .eval() .to("cuda:0") ) processor = AutoProcessor.from_pretrained(args.save_dir) messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "Describe the image."}, ], }, ] url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg" image = Image.open(requests.get(url, stream=True).raw) messages = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) print(messages) inputs = processor( images=[image], text=messages, return_tensors="pt", ) inputs = inputs.to("cuda:0") inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16) with torch.inference_mode(): output_ids = model.generate(**inputs, max_new_tokens=128, do_sample=False) generated_ids = [output_ids[len(input_ids) :] for input_ids, output_ids in zip(inputs.input_ids, output_ids)] output_text = processor.batch_decode(generated_ids, skip_special_tokens=True) print(output_text) if __name__ == "__main__": main()
transformers/src/transformers/models/ovis2/convert_ovis2_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/ovis2/convert_ovis2_weights_to_hf.py", "repo_id": "transformers", "token_count": 5905 }
500
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OWL-ViT checkpoints from the original repository. URL: https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit""" import argparse import collections import jax import jax.numpy as jnp import torch import torch.nn as nn from clip.model import CLIP from flax.training import checkpoints from huggingface_hub import Repository from transformers import ( CLIPTokenizer, OwlViTConfig, OwlViTForObjectDetection, OwlViTImageProcessor, OwlViTModel, OwlViTProcessor, ) CONFIGS = { "vit_b32": { "embed_dim": 512, "image_resolution": 768, "context_length": 16, "vocab_size": 49408, "vision_layers": 12, "vision_width": 768, "vision_patch_size": 32, "transformer_width": 512, "transformer_heads": 8, "transformer_layers": 12, }, "vit_b16": { "embed_dim": 512, "image_resolution": 768, "context_length": 16, "vocab_size": 49408, "vision_layers": 12, "vision_width": 768, "vision_patch_size": 16, "transformer_width": 512, "transformer_heads": 8, "transformer_layers": 12, }, "vit_l14": { "embed_dim": 768, "image_resolution": 840, "context_length": 16, "vocab_size": 49408, "vision_layers": 24, "vision_width": 1024, "vision_patch_size": 14, "transformer_width": 768, "transformer_heads": 12, "transformer_layers": 12, }, } def flatten_nested_dict(params, parent_key="", sep="/"): items = [] for k, v in params.items(): new_key = parent_key + sep + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(flatten_nested_dict(v, new_key, sep=sep).items()) else: items.append((new_key, v)) return dict(items) def to_f32(params): return jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, params) def copy_attn_layer(hf_attn_layer, pt_attn_layer): q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0) q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0) out_proj_weights = pt_attn_layer.out_proj.weight out_proj_bias = pt_attn_layer.out_proj.bias hf_attn_layer.q_proj.weight.data = q_proj hf_attn_layer.q_proj.bias.data = q_proj_bias hf_attn_layer.k_proj.weight.data = k_proj hf_attn_layer.k_proj.bias.data = k_proj_bias hf_attn_layer.v_proj.weight.data = v_proj hf_attn_layer.v_proj.bias.data = v_proj_bias hf_attn_layer.out_proj.weight = out_proj_weights hf_attn_layer.out_proj.bias = out_proj_bias def copy_mlp(hf_mlp, pt_mlp): copy_linear(hf_mlp.fc1, pt_mlp.c_fc) copy_linear(hf_mlp.fc2, pt_mlp.c_proj) def copy_linear(hf_linear, pt_linear): hf_linear.weight = pt_linear.weight hf_linear.bias = pt_linear.bias def copy_layer(hf_layer, pt_layer): # copy layer norms copy_linear(hf_layer.layer_norm1, pt_layer.ln_1) copy_linear(hf_layer.layer_norm2, pt_layer.ln_2) # copy MLP copy_mlp(hf_layer.mlp, pt_layer.mlp) # copy attn copy_attn_layer(hf_layer.self_attn, pt_layer.attn) def copy_layers(hf_layers, pt_layers): for hf_layer, pt_layer in zip(hf_layers, pt_layers): copy_layer(hf_layer, pt_layer) def copy_encoder(hf_encoder, pt_model): # copy embeds hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding # copy layer norm copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final) # copy hidden layers copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks) def copy_text_model_and_projection(hf_model, pt_model): # copy projection hf_model.text_projection.weight.data = pt_model.text_projection.data.T # copy text encoder copy_encoder(hf_model.text_model, pt_model) def copy_vision_model_and_projection(hf_model, pt_model): # copy projection hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T # copy layer norms copy_linear(hf_model.vision_model.pre_layernorm, pt_model.visual.ln_pre) copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post) # copy embeds hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data # copy encoder copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks) def copy_class_merge_token(hf_model, flax_params): flax_class_token_params = flatten_nested_dict(flax_params["backbone"]["merged_class_token"]) weight = torch.from_numpy(flax_class_token_params["scale"]) bias = torch.from_numpy(flax_class_token_params["bias"]) hf_model.layer_norm.weight = nn.Parameter(weight) hf_model.layer_norm.bias = nn.Parameter(bias) def copy_class_box_heads(hf_model, flax_params): pt_params = hf_model.state_dict() new_params = {} # Rename class prediction head flax params to pytorch HF flax_class_params = flatten_nested_dict(flax_params["class_head"]) for flax_key, v in flax_class_params.items(): torch_key = flax_key.replace("/", ".") torch_key = torch_key.replace(".kernel", ".weight") torch_key = torch_key.replace("Dense_0", "dense0") torch_key = "class_head." + torch_key if "weight" in torch_key and v.ndim == 2: v = v.T new_params[torch_key] = nn.Parameter(torch.from_numpy(v)) # Rename box prediction box flax params to pytorch HF flax_box_params = flatten_nested_dict(flax_params["obj_box_head"]) for flax_key, v in flax_box_params.items(): torch_key = flax_key.replace("/", ".") torch_key = torch_key.replace(".kernel", ".weight") torch_key = torch_key.replace("_", "").lower() torch_key = "box_head." + torch_key if "weight" in torch_key and v.ndim == 2: v = v.T new_params[torch_key] = nn.Parameter(torch.from_numpy(v)) # Copy flax params to PyTorch params for name, param in new_params.items(): if name in pt_params: pt_params[name].copy_(param) def copy_flax_attn_params(hf_backbone, flax_attn_params): for k, v in flax_attn_params.items(): if k.startswith("transformer"): torch_key = k.replace("transformer.resblocks", "text_model.encoder.layers") else: torch_key = k.replace("visual.transformer.resblocks", "vision_model.encoder.layers") torch_key = torch_key.replace("attn", "self_attn") torch_key = torch_key.replace("key", "k_proj") torch_key = torch_key.replace("value", "v_proj") torch_key = torch_key.replace("query", "q_proj") torch_key = torch_key.replace("out", "out_proj") if "bias" in torch_key and v.ndim == 2: shape = v.shape[0] * v.shape[1] v = v.reshape(shape) if "weight" in torch_key and "out" in torch_key: shape = (v.shape[0] * v.shape[1], v.shape[2]) v = v.reshape(shape).T if "weight" in torch_key and "out" not in torch_key: shape = (v.shape[0], v.shape[1] * v.shape[2]) v = v.reshape(shape).T # Copy flax CLIP attn params to HF PyTorch params v = torch.from_numpy(v) hf_backbone.state_dict()[torch_key].copy_(v) def _convert_attn_layers(params): new_params = {} processed_attn_layers = [] for k, v in params.items(): if "attn." in k: base = k[: k.rindex("attn.") + 5] if base in processed_attn_layers: continue processed_attn_layers.append(base) dim = params[base + "out.weight"].shape[-1] new_params[base + "out_proj.weight"] = params[base + "out.weight"].reshape(dim, dim).T new_params[base + "out_proj.bias"] = params[base + "out.bias"] else: new_params[k] = v return new_params def convert_clip_backbone(flax_params, torch_config): torch_model = CLIP(**torch_config) torch_model.eval() torch_clip_params = torch_model.state_dict() flax_clip_params = flatten_nested_dict(flax_params["backbone"]["clip"]) new_torch_params = {} for flax_key, v in flax_clip_params.items(): torch_key = flax_key.replace("/", ".") torch_key = torch_key.replace("text.token_embedding.embedding", "token_embedding.kernel") if ( torch_key.startswith("text.transformer") or torch_key.startswith("text.text_projection") or torch_key.startswith("text.ln_final") or torch_key.startswith("text.positional_embedding") ): torch_key = torch_key[5:] torch_key = torch_key.replace("text_projection.kernel", "text_projection") torch_key = torch_key.replace("visual.proj.kernel", "visual.proj") torch_key = torch_key.replace(".scale", ".weight") torch_key = torch_key.replace(".kernel", ".weight") if "conv" in torch_key or "downsample.0.weight" in torch_key: v = v.transpose(3, 2, 0, 1) elif "weight" in torch_key and v.ndim == 2 and "embedding" not in torch_key: # Fully connected layers are transposed, embeddings are not v = v.T new_torch_params[torch_key] = v attn_params = _convert_attn_layers(new_torch_params) new_torch_params.update(attn_params) attn_params = {} # Copy flax CLIP backbone params to PyTorch params for name, param in new_torch_params.items(): if name in torch_clip_params: new_param = torch.from_numpy(param) torch_clip_params[name].copy_(new_param) else: attn_params[name] = param return torch_clip_params, torch_model, attn_params @torch.no_grad() def convert_owlvit_checkpoint(pt_backbone, flax_params, attn_params, pytorch_dump_folder_path, config_path=None): """ Copy/paste/tweak model's weights to transformers design. """ repo = Repository(pytorch_dump_folder_path, clone_from=f"google/{pytorch_dump_folder_path}") repo.git_pull() if config_path is not None: config = OwlViTConfig.from_pretrained(config_path) else: config = OwlViTConfig() hf_backbone = OwlViTModel(config).eval() hf_model = OwlViTForObjectDetection(config).eval() copy_text_model_and_projection(hf_backbone, pt_backbone) copy_vision_model_and_projection(hf_backbone, pt_backbone) hf_backbone.logit_scale = pt_backbone.logit_scale copy_flax_attn_params(hf_backbone, attn_params) hf_model.owlvit = hf_backbone copy_class_merge_token(hf_model, flax_params) copy_class_box_heads(hf_model, flax_params) # Save HF model hf_model.save_pretrained(repo.local_dir) # Initialize image processor image_processor = OwlViTImageProcessor( size=config.vision_config.image_size, crop_size=config.vision_config.image_size ) # Initialize tokenizer tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32", pad_token="!", model_max_length=16) # Initialize processor processor = OwlViTProcessor(image_processor=image_processor, tokenizer=tokenizer) image_processor.save_pretrained(repo.local_dir) processor.save_pretrained(repo.local_dir) repo.git_add() repo.git_commit("Upload model and processor") repo.git_push() if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--owlvit_version", default=None, type=str, required=True, help="OWL-ViT model name [clip_b16, clip_b32, clip_l14].", ) parser.add_argument( "--owlvit_checkpoint", default=None, type=str, required=True, help="Path to flax model checkpoint." ) parser.add_argument("--hf_config", default=None, type=str, required=True, help="Path to HF model config.") parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model." ) args = parser.parse_args() # Initialize PyToch clip model model_name = args.owlvit_version if model_name == "clip_b16": torch_config = CONFIGS["vit_b16"] elif model_name == "clip_b32": torch_config = CONFIGS["vit_b32"] elif model_name == "clip_l14": torch_config = CONFIGS["vit_l14"] # Load from checkpoint and convert params to float-32 variables = checkpoints.restore_checkpoint(args.owlvit_checkpoint, target=None)["optimizer"]["target"] flax_params = jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, variables) del variables # Convert CLIP backbone pt_backbone_params, clip_pt, attn_params = convert_clip_backbone(flax_params, torch_config) convert_owlvit_checkpoint(clip_pt, flax_params, attn_params, args.pytorch_dump_folder_path, args.hf_config)
transformers/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py", "repo_id": "transformers", "token_count": 6084 }
501
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PatchTST model configuration""" from typing import Optional, Union from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) class PatchTSTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an PatchTST model according to the specified arguments, defining the model architecture. [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_input_channels (`int`, *optional*, defaults to 1): The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of multivariate targets. context_length (`int`, *optional*, defaults to 32): The context length of the input sequence. distribution_output (`str`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". loss (`str`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". patch_length (`int`, *optional*, defaults to 1): Define the patch length of the patchification process. patch_stride (`int`, *optional*, defaults to 1): Define the stride of the patchification process. num_hidden_layers (`int`, *optional*, defaults to 3): Number of hidden layers. d_model (`int`, *optional*, defaults to 128): Dimensionality of the transformer layers. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. share_embedding (`bool`, *optional*, defaults to `True`): Sharing the input embedding across all channels. channel_attention (`bool`, *optional*, defaults to `False`): Activate channel attention block in the Transformer to allow channels to attend each other. ffn_dim (`int`, *optional*, defaults to 512): Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. norm_type (`str` , *optional*, defaults to `"batchnorm"`): Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the attention probabilities. positional_dropout (`float`, *optional*, defaults to 0.0): The dropout probability in the positional embedding layer. path_dropout (`float`, *optional*, defaults to 0.0): The dropout path in the residual block. ff_dropout (`float`, *optional*, defaults to 0.0): The dropout probability used between the two layers of the feed-forward networks. bias (`bool`, *optional*, defaults to `True`): Whether to add bias in the feed-forward networks. activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. pre_norm (`bool`, *optional*, defaults to `True`): Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is applied after residual block. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. use_cls_token (`bool`, *optional*, defaults to `False`): Whether cls token is used. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. share_projection (`bool`, *optional*, defaults to `True`): Sharing the projection layer across different channels in the forecast head. scaling (`Union`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". do_mask_input (`bool`, *optional*): Apply masking during the pretraining. mask_type (`str`, *optional*, defaults to `"random"`): Masking type. Only `"random"` and `"forecast"` are currently supported. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio applied to mask the input data during random pretraining. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. channel_consistent_masking (`bool`, *optional*, defaults to `False`): If channel consistent masking is True, all the channels will have the same masking pattern. unmasked_channel_indices (`list`, *optional*): Indices of channels that are not masked during pretraining. Values in the list are number between 1 and `num_input_channels` mask_value (`int`, *optional*, defaults to 0): Values in the masked patches will be filled by `mask_value`. pooling_type (`str`, *optional*, defaults to `"mean"`): Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. head_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for head. prediction_length (`int`, *optional*, defaults to 24): The prediction horizon that the model will output. num_targets (`int`, *optional*, defaults to 1): Number of targets for regression and classification tasks. For classification, it is the number of classes. output_range (`list`, *optional*): Output range for regression task. The range of output values can be set to enforce the model to produce values within a range. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples is generated in parallel for probabilistic prediction. ```python >>> from transformers import PatchTSTConfig, PatchTSTModel >>> # Initializing an PatchTST configuration with 12 time steps for prediction >>> configuration = PatchTSTConfig(prediction_length=12) >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "patchtst" attribute_map = { "hidden_size": "d_model", "num_attention_heads": "num_attention_heads", "num_hidden_layers": "num_hidden_layers", } def __init__( self, # time series specific configuration num_input_channels: int = 1, context_length: int = 32, distribution_output: str = "student_t", loss: str = "mse", # PatchTST arguments patch_length: int = 1, patch_stride: int = 1, # Transformer architecture configuration num_hidden_layers: int = 3, d_model: int = 128, num_attention_heads: int = 4, share_embedding: bool = True, channel_attention: bool = False, ffn_dim: int = 512, norm_type: str = "batchnorm", norm_eps: float = 1e-05, attention_dropout: float = 0.0, positional_dropout: float = 0.0, path_dropout: float = 0.0, ff_dropout: float = 0.0, bias: bool = True, activation_function: str = "gelu", pre_norm: bool = True, positional_encoding_type: str = "sincos", use_cls_token: bool = False, init_std: float = 0.02, share_projection: bool = True, scaling: Optional[Union[str, bool]] = "std", # mask pretraining do_mask_input: Optional[bool] = None, mask_type: str = "random", random_mask_ratio: float = 0.5, num_forecast_mask_patches: Optional[Union[list[int], int]] = [2], channel_consistent_masking: Optional[bool] = False, unmasked_channel_indices: Optional[list[int]] = None, mask_value: int = 0, # head pooling_type: str = "mean", head_dropout: float = 0.0, prediction_length: int = 24, num_targets: int = 1, output_range: Optional[list] = None, # distribution head num_parallel_samples: int = 100, **kwargs, ): # time series specific configuration self.context_length = context_length self.num_input_channels = num_input_channels # n_vars self.loss = loss self.distribution_output = distribution_output self.num_parallel_samples = num_parallel_samples # Transformer architecture configuration self.d_model = d_model self.num_attention_heads = num_attention_heads self.ffn_dim = ffn_dim self.num_hidden_layers = num_hidden_layers self.attention_dropout = attention_dropout self.share_embedding = share_embedding self.channel_attention = channel_attention self.norm_type = norm_type self.norm_eps = norm_eps self.positional_dropout = positional_dropout self.path_dropout = path_dropout self.ff_dropout = ff_dropout self.bias = bias self.activation_function = activation_function self.pre_norm = pre_norm self.positional_encoding_type = positional_encoding_type self.use_cls_token = use_cls_token self.init_std = init_std self.scaling = scaling # PatchTST parameters self.patch_length = patch_length self.patch_stride = patch_stride # Mask pretraining self.do_mask_input = do_mask_input self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio # for random masking self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking self.channel_consistent_masking = channel_consistent_masking self.unmasked_channel_indices = unmasked_channel_indices self.mask_value = mask_value # general head params self.pooling_type = pooling_type self.head_dropout = head_dropout # For prediction head self.share_projection = share_projection self.prediction_length = prediction_length # For prediction and regression head self.num_parallel_samples = num_parallel_samples # Regression self.num_targets = num_targets self.output_range = output_range super().__init__(**kwargs) __all__ = ["PatchTSTConfig"]
transformers/src/transformers/models/patchtst/configuration_patchtst.py/0
{ "file_path": "transformers/src/transformers/models/patchtst/configuration_patchtst.py", "repo_id": "transformers", "token_count": 4674 }
502
# coding=utf-8 # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Persimmon model.""" from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import ( GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer, ) from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_persimmon import PersimmonConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Persimmon class PersimmonRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: PersimmonConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXMLP with GPTNeoX->Persimmon class PersimmonMLP(nn.Module): def __init__(self, config): super().__init__() self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size) self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size) self.act = ACT2FN[config.hidden_act] def forward(self, hidden_states): hidden_states = self.dense_h_to_4h(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dense_4h_to_h(hidden_states) return hidden_states def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class PersimmonAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: PersimmonConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.rope_theta = config.rope_theta self.rotary_ndims = int(self.head_dim * config.partial_rotary_factor) self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True) self.qk_layernorm = config.qk_layernorm self.scaling = self.head_dim**-0.5 if self.qk_layernorm: self.q_layernorm = nn.LayerNorm( config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True ) self.k_layernorm = nn.LayerNorm( config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True ) self.attention_dropout = nn.Dropout(config.attention_dropout) self.rotary_emb = PersimmonRotaryEmbedding(config=self.config) def _split_heads(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim] """ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() # [batch_size, seq_length, 3 x hidden_size] fused_qkv = self.query_key_value(hidden_states) # 3 x [batch_size, seq_length, num_heads, head_dim] (query_states, key_states, value_states) = self._split_heads(fused_qkv) if self.qk_layernorm: query_states = self.q_layernorm(query_states) key_states = self.k_layernorm(key_states) # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim] query_states = query_states.transpose(1, 2) value_states = value_states.transpose(1, 2) key_states = key_states.transpose(1, 2) cos, sin = position_embeddings # Partial rotary embedding query_rot, query_pass = ( query_states[..., : self.rotary_ndims], query_states[..., self.rotary_ndims :], ) key_rot, key_pass = ( key_states[..., : self.rotary_ndims], key_states[..., self.rotary_ndims :], ) # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) # [batch_size, seq_length, num_heads, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if past_key_values is not None: # Specific to RoPE models with partial rotation cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.config.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.dense(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class PersimmonDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: PersimmonConfig, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = PersimmonAttention(config=config, layer_idx=layer_idx) self.mlp = PersimmonMLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + residual outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class PersimmonPreTrainedModel(PreTrainedModel): config: PersimmonConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["PersimmonDecoderLayer"] _skip_keys_device_placement = "past_key_values" _can_compile_fullgraph = True _supports_sdpa = True _supports_flash_attn = True _supports_attention_backend = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() @auto_docstring class PersimmonModel(PersimmonPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`] Args: config: PersimmonConfig """ def __init__(self, config: PersimmonConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [PersimmonDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = PersimmonRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.final_layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask class PersimmonForCausalLM(PersimmonPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->PERSIMMON,Llama->Persimmon def __init__(self, config): super().__init__(config) self.model = PersimmonModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder def set_decoder(self, decoder): self.model = decoder # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, PersimmonForCausalLM >>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base") >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base") >>> prompt = "human: Hey, what should I eat for dinner?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # No upscaling to float was ever done for Persimmon slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PersimmonForSequenceClassification(GenericForSequenceClassification, PersimmonPreTrainedModel): ... class PersimmonForTokenClassification(GenericForTokenClassification, PersimmonPreTrainedModel): ... __all__ = [ "PersimmonForCausalLM", "PersimmonModel", "PersimmonPreTrainedModel", "PersimmonForSequenceClassification", "PersimmonForTokenClassification", ]
transformers/src/transformers/models/persimmon/modeling_persimmon.py/0
{ "file_path": "transformers/src/transformers/models/persimmon/modeling_persimmon.py", "repo_id": "transformers", "token_count": 14740 }
503
# Copyright 2025 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Callable, Optional, Union import numpy as np import torch import torch.nn.functional as F from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...configuration_utils import PretrainedConfig from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling, CausalLMOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, logging from ...utils.generic import TransformersKwargs, check_model_inputs from ..phi3.configuration_phi3 import Phi3Config from ..phi3.modeling_phi3 import ( Phi3DecoderLayer, Phi3ForCausalLM, Phi3Model, Phi3PreTrainedModel, Phi3RMSNorm, Phi3RotaryEmbedding, ) from ..siglip.configuration_siglip import SiglipVisionConfig from ..siglip.modeling_siglip import ( SiglipEncoder, SiglipEncoderLayer, SiglipMLP, SiglipMultiheadAttentionPoolingHead, SiglipPreTrainedModel, SiglipVisionEmbeddings, default_flax_embed_init, lecun_normal_, ) logger = logging.get_logger(__name__) class Phi4MultimodalVisionConfig(SiglipVisionConfig): r""" This is the configuration class to store the configuration of a [`Phi4MultimodalVisionModel`]. It is used to instantiate a Phi4Multimodal vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 4304): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 27): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 448): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. crop_size (`int`, *optional*, defaults to 448): Crop size for the input images. image_token_id (`int`, *optional*, defaults to 200010): The image token id. feature_layer (`int`, *optional*, defaults to -2): The index of the layer of the encoder from which to extract image features. Example: ```python >>> from transformers import Phi4MultimodalVisionConfig >>> # Initializing a Phi4MultimodalVisionConfig with microsoft/Phi-4-multimodal-instruct style configuration >>> configuration = Phi4MultimodalVisionConfig() ```""" model_type = "phi4_multimodal_vision" def __init__( self, hidden_size=1152, intermediate_size=4304, num_hidden_layers=27, num_attention_heads=16, num_channels=3, image_size=448, patch_size=14, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, crop_size: int = 448, image_token_id: int = 200010, feature_layer: int = -2, **kwargs, ): super().__init__( hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_channels=num_channels, image_size=image_size, patch_size=patch_size, hidden_act=hidden_act, layer_norm_eps=layer_norm_eps, attention_dropout=attention_dropout, **kwargs, ) self.crop_size = crop_size self.image_token_id = image_token_id self.feature_layer = feature_layer class Phi4MultimodalAudioConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Phi4MultimodalAudioModel`]. It is used to instantiate a Phi4Multimodal audio encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the audio encoder of [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers. intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_blocks (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. activation (`str`, *optional*, defaults to `"swish"`): The non-linear activation function in the MLPs. chunk_size (`int`, *optional*, defaults to -1): The chunk size to create the masks. left_chunk (`int`, *optional*, defaults to 18): The left chunk to create the masks. dropout_rate (`float`, *optional*, defaults to 0.0): The dropout ratio. ext_pw_out_channel (`int`, *optional*, defaults to 1024): Number of out channels in the point-wise conv modules. depthwise_seperable_out_channel (`int`, *optional*, defaults to 1024): Number of out channels in the depth-wise separable conv modules. depthwise_multiplier (`int`, *optional*, defaults to 1): Input size multiplier for the depth-wise separable conv modules. kernel_size (`int`, *optional*, defaults to 3): Kernel size for the depth-wise separable conv modules. conv_activation (`str`, *optional*, defaults to `"swish"`): The non-linear activation function in the conv modules. input_size (`int`, *optional*, defaults to 80): Input size for the audio model. conv_glu_type (`str`, *optional*, defaults to `"swish"`): The non-linear activation function in the point-wise conv modules. time_reduction (`int`, *optional*, defaults to 8): Time reduction (subsampling factor). bias_max_distance (`int`, *optional*, defaults to 1000): Max distance for the relative attention bias module. bias_symmetric (`bool`, *optional*, defaults to `False`): Whether the relative attention bias should be symmetric or not. nemo_activation (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in the nemo conv modules. nemo_conv_channels (`int`, *optional*, defaults to 1024): Number of channels in the nemo conv modules. downsample_rate (`int`, *optional*, defaults to 1): Downsample rate for the audio feature extractor. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. audio_token_id (`int`, *optional*, defaults to 200011): The audio token id. feature_layer (`int`, *optional*, defaults to -2): The index of the layer of the encoder from which to extract audio features. Example: ```python >>> from transformers import Phi4MultimodalAudioConfig >>> # Initializing a Phi4MultimodalAudioConfig with microsoft/Phi-4-multimodal-instruct style configuration >>> configuration = Phi4MultimodalAudioConfig() ```""" model_type = "phi4_multimodal_audio" def __init__( self, hidden_size: int = 1024, intermediate_size: int = 1536, num_blocks: int = 24, num_attention_heads: int = 16, activation: str = "swish", chunk_size: int = -1, left_chunk: int = 18, dropout_rate: float = 0.0, ext_pw_out_channel: int = 1024, depthwise_seperable_out_channel: int = 1024, depthwise_multiplier: int = 1, kernel_size: int = 3, conv_activation: str = "swish", input_size: int = 80, conv_glu_type: str = "swish", time_reduction: int = 8, bias_max_distance: int = 1000, bias_symmetric: bool = False, nemo_activation: str = "relu", nemo_conv_channels: int = 1024, downsample_rate: int = 1, initializer_range: float = 0.02, audio_token_id: int = 200011, feature_layer: int = -2, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.activation = activation self.chunk_size = chunk_size self.left_chunk = left_chunk self.num_blocks = num_blocks self.dropout_rate = dropout_rate self.ext_pw_out_channel = ext_pw_out_channel self.depthwise_seperable_out_channel = depthwise_seperable_out_channel self.depthwise_multiplier = depthwise_multiplier self.kernel_size = kernel_size self.conv_activation = conv_activation self.input_size = input_size self.conv_glu_type = conv_glu_type self.time_reduction = time_reduction self.bias_max_distance = bias_max_distance self.bias_symmetric = bias_symmetric self.nemo_activation = nemo_activation self.nemo_conv_channels = nemo_conv_channels self.downsample_rate = downsample_rate self.audio_token_id = audio_token_id self.initializer_range = initializer_range self.feature_layer = feature_layer if time_reduction % 2 != 0: raise ValueError("`time_reduction` should be a multiple of 2!") length = input_size for _ in range(int(math.log(time_reduction, 2))): length = math.floor((length - 1) / 2 + 1) self.nemo_final_size = length class Phi4MultimodalConfig(Phi3Config): r""" This is the configuration class to store the configuration of a [`Phi4MultimodalModel`]. It is used to instantiate a Phi4Multimodal model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 200064): Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Phi3Model`]. hidden_size (`int`, *optional*, defaults to 3072): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 8192): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`. resid_pdrop (`float`, *optional*, defaults to 0.0): Dropout probability for mlp outputs. embd_pdrop (`int`, *optional*, defaults to 0.0): The dropout ratio for the embeddings. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio after computing the attention scores. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon value used for the RMSNorm. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`dict`, *optional*): The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size divided by the number of attention heads divided by 2. partial_rotary_factor (`float`, *optional*, defaults to `1.0`): Percentage of the query and keys which will have rotary embedding. Must be between 0.0 and 1.0. bos_token_id (`int`, *optional*, defaults to 199999): The id of the "beginning-of-sequence" token. eos_token_id (`int` or `list[int]`, *optional*, defaults to `[199999, 200020]`): The id of the "end-of-sequence" token. pad_token_id (`int`, *optional*, defaults to 199999): The id of the padding token. original_max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model was trained with. This is used to determine the size of the original RoPE embeddings when using long scaling. sliding_window (`int`, *optional*): Sliding window attention window size. If `None`, no sliding window is applied. vision_config (`Phi4MultimodalVisionConfig` or `dict`, *optional*): The vision config for the underlying image embedding model. If not provided, will default to the configuration used to instantiate a model similar in architecture as [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct). audio_config (`Phi4MultimodalAudioConfig` or `dict`, *optional*): The audio config for the underlying audio embedding model. If not provided, will default to the configuration used to instantiate a model similar in architecture as [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct). Example: ```python >>> from transformers import Phi4MultimodalModel, Phi4MultimodalConfig >>> # Initializing a Phi4Multimodal style configuration >>> configuration = Phi4MultimodalConfig.from_pretrained("microsoft/Phi-4-multimodal-instruct") >>> # Initializing a model from the configuration >>> model = Phi4MultimodalModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" sub_configs = {"audio_config": Phi4MultimodalAudioConfig, "vision_config": Phi4MultimodalVisionConfig} def __init__( self, vocab_size=200064, hidden_size=3072, intermediate_size=8192, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, resid_pdrop=0.0, embd_pdrop=0.0, attention_dropout=0.0, hidden_act="silu", max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=1, bos_token_id=199999, eos_token_id=[199999, 200020], pad_token_id=199999, original_max_position_embeddings=4096, sliding_window=None, vision_config=None, audio_config=None, **kwargs, ): super().__init__( vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, resid_pdrop=resid_pdrop, embd_pdrop=embd_pdrop, attention_dropout=attention_dropout, hidden_act=hidden_act, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, rms_norm_eps=rms_norm_eps, use_cache=use_cache, tie_word_embeddings=tie_word_embeddings, rope_theta=rope_theta, rope_scaling=rope_scaling, partial_rotary_factor=partial_rotary_factor, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, original_max_position_embeddings=original_max_position_embeddings, sliding_window=sliding_window, **kwargs, ) if isinstance(vision_config, dict): vision_config = Phi4MultimodalVisionConfig(**vision_config) elif vision_config is None: Phi4MultimodalVisionConfig() self.vision_config = vision_config if isinstance(audio_config, dict): audio_config = Phi4MultimodalAudioConfig(**audio_config) elif vision_config is None: audio_config = Phi4MultimodalAudioConfig() self.audio_config = audio_config class Phi4MultimodalVisionMLP(SiglipMLP): pass def simple_eager_attention_forward( module: nn.Module, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class Phi4MultimodalVisionAttention(nn.Module): def __init__(self, config: Phi4MultimodalVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.scaling = self.head_dim**-0.5 self.is_causal = True self.attention_dropout = config.attention_dropout self.k_proj = nn.Linear(config.hidden_size, config.hidden_size) self.v_proj = nn.Linear(config.hidden_size, config.hidden_size) self.q_proj = nn.Linear(config.hidden_size, config.hidden_size) self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = simple_eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class Phi4MultimodalVisionEncoderLayer(SiglipEncoderLayer): def __init__(self, config: Phi4MultimodalVisionConfig): super().__init__(config) self.self_attn = Phi4MultimodalVisionAttention(config) self.mlp = Phi4MultimodalVisionMLP(config) class Phi4MultimodalVisionEncoder(SiglipEncoder): def __init__(self, config: Phi4MultimodalVisionConfig): super().__init__() self.layers = nn.ModuleList( [Phi4MultimodalVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)] ) class Phi4MultimodalVisionPreTrainedModel(SiglipPreTrainedModel): config: Phi4MultimodalVisionConfig base_model_prefix = "phi4_vision" supports_gradient_checkpointing = True _no_split_modules = ["Phi4MultimodalVisionEncoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Phi4MultimodalVisionEmbeddings): width = ( self.config.hidden_size if isinstance(self.config, Phi4MultimodalVisionConfig) else self.config.hidden_size ) nn.init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width)) elif isinstance(module, nn.Embedding): default_flax_embed_init(module.weight) elif isinstance(module, Phi4MultimodalVisionAttention): nn.init.normal_(module.q_proj.weight) nn.init.normal_(module.k_proj.weight) nn.init.normal_(module.v_proj.weight) nn.init.normal_(module.out_proj.weight) nn.init.zeros_(module.q_proj.bias) nn.init.zeros_(module.k_proj.bias) nn.init.zeros_(module.v_proj.bias) nn.init.zeros_(module.out_proj.bias) elif isinstance(module, Phi4MultimodalVisionMLP): nn.init.normal_(module.fc1.weight) nn.init.normal_(module.fc2.weight) nn.init.normal_(module.fc1.bias, std=1e-6) nn.init.normal_(module.fc2.bias, std=1e-6) elif isinstance(module, Phi4MultimodalVisionMultiheadAttentionPoolingHead): nn.init.normal_(module.probe.data) nn.init.normal_(module.attention.in_proj_weight.data) nn.init.zeros_(module.attention.in_proj_bias.data) elif isinstance(module, (nn.Linear, nn.Conv2d)): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class Phi4MultimodalVisionEmbeddings(SiglipVisionEmbeddings, nn.Module): def __init__(self, config: Phi4MultimodalVisionConfig): nn.Module.__init__(self) self.config = config self.patch_size = config.patch_size self.num_patches_per_side = config.image_size // self.patch_size self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=config.hidden_size, kernel_size=self.patch_size, stride=self.patch_size, padding="valid", ) self.position_embedding = nn.Embedding(self.num_patches_per_side**2, config.hidden_size) def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor: batch_size = pixel_values.size(0) patch_embeds = self.patch_embedding(pixel_values) embeddings = patch_embeds.flatten(2).transpose(1, 2) max_im_h, max_im_w = pixel_values.size(2), pixel_values.size(3) max_nb_patches_h, max_nb_patches_w = max_im_h // self.patch_size, max_im_w // self.patch_size boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side) position_ids = torch.full((batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0) for batch_idx, p_attn_mask in enumerate(patch_attention_mask): nb_patches_h = p_attn_mask[:, 0].sum() nb_patches_w = p_attn_mask[0].sum() fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h) fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w) bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True) bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True) pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten() position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids position_ids = position_ids.to(self.position_embedding.weight.device) embeddings = embeddings + self.position_embedding(position_ids) return embeddings class Phi4MultimodalVisionMultiheadAttentionPoolingHead(SiglipMultiheadAttentionPoolingHead): def __init__(self, config: Phi4MultimodalVisionConfig): super().__init__(config) self.mlp = Phi4MultimodalVisionMLP(config) def forward(self, hidden_state, attention_mask): batch_size = hidden_state.shape[0] probe = self.probe.repeat(batch_size, 1, 1) hidden_state = self.attention( query=probe, key=hidden_state, value=hidden_state, key_padding_mask=~attention_mask )[0] residual = hidden_state hidden_state = self.layernorm(hidden_state) hidden_state = residual + self.mlp(hidden_state) return hidden_state[:, 0] class Phi4MultimodalVisionModel(Phi4MultimodalVisionPreTrainedModel): config: Phi4MultimodalVisionConfig main_input_name = "pixel_values" def __init__(self, config: Phi4MultimodalVisionConfig): super().__init__(config) self.config = config self.embeddings = Phi4MultimodalVisionEmbeddings(config) self.encoder = Phi4MultimodalVisionEncoder(config) self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.head = Phi4MultimodalVisionMultiheadAttentionPoolingHead(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.embeddings.patch_embedding def forward( self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> BaseModelOutputWithPooling: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) batch_size = pixel_values.size(0) if patch_attention_mask is None: patch_attention_mask = torch.ones( size=( batch_size, pixel_values.size(2) // self.config.patch_size, pixel_values.size(3) // self.config.patch_size, ), dtype=torch.bool, device=pixel_values.device, ) hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask) patch_attention_mask = patch_attention_mask.view(batch_size, -1) # The call to `_upad_input` in `_flash_attention_forward` is expensive # So when the `patch_attention_mask` is full of 1s (i.e. attending to the whole sequence), # avoiding passing the attention_mask, which is equivalent to attending to the full sequence if not torch.any(~patch_attention_mask): attention_mask = None else: attention_mask = ( _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype) if self.config._attn_implementation != "flash_attention_2" else patch_attention_mask ) encoder_outputs: BaseModelOutput = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) last_hidden_state = encoder_outputs.last_hidden_state last_hidden_state = self.post_layernorm(last_hidden_state) pooled_output = self.head( hidden_state=last_hidden_state, attention_mask=patch_attention_mask, ) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class Phi4MultimodalImageEmbedding(nn.Module): """Image embedding.""" def __init__(self, config: Phi4MultimodalConfig): super().__init__() self.config = config self.layer_idx = config.vision_config.feature_layer self.crop_size = config.vision_config.crop_size self.image_dim_out = config.vision_config.hidden_size n_patches = config.vision_config.image_size // config.vision_config.patch_size if n_patches % 2 != 0: self.img_processor_padding = nn.ReflectionPad2d((0, 1, 0, 1)) n_patches += 1 self.num_img_tokens = (n_patches // 2) ** 2 self.drop = nn.Dropout(config.embd_pdrop) self.img_processor = Phi4MultimodalVisionModel._from_config(config.vision_config) self.image_token_compression = nn.AvgPool2d(kernel_size=2, stride=2) self.img_projection_up = nn.Linear(self.image_dim_out, config.hidden_size) self.img_projection_down = nn.Linear(config.hidden_size, config.hidden_size) self.global_img_feature_extensor = nn.Parameter(torch.zeros([1, 1, self.image_dim_out])) self.sub_img_feature_extensor = nn.Parameter(torch.zeros([1, 1, 1, self.image_dim_out])) def get_img_features(self, img_embeds: torch.FloatTensor, attention_mask=None) -> torch.FloatTensor: img_processor_output = self.img_processor( img_embeds, patch_attention_mask=attention_mask, output_hidden_states=True ) img_feature = img_processor_output.hidden_states[self.layer_idx] patch_feature = img_feature # reshape to 2D tensor width = int(math.sqrt(patch_feature.size(1))) patch_feature = patch_feature.view(-1, width, width, patch_feature.size(-1)) # convert to NCHW patch_feature = patch_feature.permute(0, 3, 1, 2) if getattr(self, "img_processor_padding", None) is not None: patch_feature = self.img_processor_padding(patch_feature) patch_feature = self.image_token_compression(patch_feature) # convert to NHWC patch_feature = patch_feature.permute(0, 2, 3, 1) patch_feature = patch_feature.view(-1, patch_feature.size(1) * patch_feature.size(2), patch_feature.size(-1)) return patch_feature def forward( self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, image_pixel_values: torch.FloatTensor, image_sizes: Optional[torch.Tensor] = None, image_attention_mask: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: image_pixel_values = image_pixel_values.to(self.img_processor.embeddings.patch_embedding.weight.dtype) target_device = self.img_projection_up.bias.device target_dtype = self.img_projection_up.bias.dtype batch_size = image_pixel_values.shape[0] img_features = self.get_img_features( image_pixel_values.flatten(0, 1), attention_mask=image_attention_mask.flatten(0, 1).to(dtype=bool, device=target_device), ) base_feat_size = int(np.sqrt(img_features.shape[1])) img_features = img_features.view(batch_size, -1, base_feat_size**2, self.image_dim_out) image_sizes = image_sizes.view(-1, 2) output_imgs = [] for idx in range(batch_size): height, width = image_sizes[idx] height_ratio = height // self.crop_size width_ratio = width // self.crop_size area_ratio = height_ratio * width_ratio global_img = img_features[idx, :1] global_img = global_img.reshape(1, base_feat_size, base_feat_size, self.image_dim_out).contiguous() temporary_extensor = self.sub_img_feature_extensor.repeat(1, base_feat_size, 1, 1) global_img = torch.cat([global_img, temporary_extensor], dim=2).reshape(1, -1, self.image_dim_out) sub_img = img_features[idx, 1:] sub_img = sub_img[:area_ratio] sub_img = ( sub_img.reshape(height_ratio, width_ratio, base_feat_size, base_feat_size, self.image_dim_out) .transpose(1, 2) .reshape(1, height_ratio * base_feat_size, width_ratio * base_feat_size, self.image_dim_out) .contiguous() ) if image_attention_mask is not None: reshaped_image_attention_mask = ( image_attention_mask[idx, 1 : area_ratio + 1, 0::2, 0::2] .reshape(height_ratio, width_ratio, base_feat_size, base_feat_size) .transpose(1, 2) .reshape(1, height_ratio * base_feat_size, width_ratio * base_feat_size) ) useful_height = int(reshaped_image_attention_mask[0, :, 0].sum().item()) useful_width = int(reshaped_image_attention_mask[0, 0, :].sum().item()) sub_img = sub_img[:, :useful_height, :useful_width] temporary_extensor = self.sub_img_feature_extensor.repeat(1, useful_height, 1, 1) else: temporary_extensor = self.sub_img_feature_extensor.repeat(1, height_ratio * base_feat_size, 1, 1) sub_img = torch.cat([sub_img, temporary_extensor], dim=2).reshape(1, -1, self.image_dim_out) # Merge global and sub output_imgs.append(torch.cat([sub_img, self.global_img_feature_extensor, global_img], dim=1)) img_set_tensor = [] for output_img in output_imgs: output_img = output_img.to(device=target_device, dtype=target_dtype) img_feature_proj = self.img_projection_up(output_img) img_feature_proj = nn.functional.gelu(img_feature_proj) img_feature_proj = self.img_projection_down(img_feature_proj) img_set_tensor.append(img_feature_proj) merged_img_set_tensor = torch.cat(img_set_tensor, dim=1).squeeze(0) merged_img_set_tensor = merged_img_set_tensor.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device) with torch.no_grad(): positions_tuple = torch.nonzero(input_ids == self.config.vision_config.image_token_id, as_tuple=True) # Temporarily disable autocast to avoid issue on bf16 tensors # Ref: https://github.com/pytorch/pytorch/issues/132715 with torch.autocast(device_type=inputs_embeds.device.type, enabled=False): image_embeds = inputs_embeds.index_put( indices=positions_tuple, values=merged_img_set_tensor, accumulate=False ) image_embeds = self.drop(image_embeds) return image_embeds ########################################################## AUDIO ############################################# class Phi4MultimodalAudioMLP(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.layer_norm = nn.LayerNorm(config.hidden_size) self.act_fn = ACT2FN[config.activation] self.gate_up_proj = nn.Linear(config.hidden_size, config.intermediate_size * 2) self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): hidden_states = self.layer_norm(hidden_states) up_states = self.gate_up_proj(hidden_states) up_states, gate = up_states.chunk(2, dim=-1) up_states = up_states * self.act_fn(gate) up_states = self.dropout(up_states) hidden_states = self.down_proj(up_states) out = self.dropout(hidden_states) return out class Phi4MultimodalAudioAttention(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.config = config self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.scaling = self.head_dim**-0.5 self.attention_dropout = config.dropout_rate self.is_causal = True self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs, ): input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = simple_eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, _ = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output class Phi4MultimodalAudioDepthWiseSeperableConv1d(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig, padding: int = 0): super().__init__() self.dw_conv = nn.Conv1d( config.hidden_size, config.hidden_size * config.depthwise_multiplier, config.kernel_size, 1, padding=padding, groups=config.hidden_size, ) self.pw_conv = nn.Conv1d( config.hidden_size * config.depthwise_multiplier, config.depthwise_seperable_out_channel, 1, 1, 0 ) def forward(self, hidden_states): return self.pw_conv(self.dw_conv(hidden_states)) class Phi4MultimodalAudioGluPointWiseConv(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.config = config self.output_dim = config.ext_pw_out_channel self.ext_pw_conv_1d = nn.Conv1d(config.hidden_size, config.ext_pw_out_channel * 2, kernel_size=1, stride=1) self.glu_act = ACT2FN[config.conv_glu_type] self.b1 = nn.Parameter(torch.zeros(1, config.ext_pw_out_channel, 1)) self.b2 = nn.Parameter(torch.zeros(1, config.ext_pw_out_channel, 1)) def forward(self, hidden_states): # we assume the input always has the #channel (#dim) in the last dimension of the # tensor, so need to switch the dimension first for 1D-Conv case hidden_states = hidden_states.permute([0, 2, 1]) hidden_states = self.ext_pw_conv_1d(hidden_states) out = hidden_states[:, 0 : self.output_dim, :] + self.b1 out = out * self.glu_act(hidden_states[:, self.output_dim : self.output_dim * 2, :] + self.b2) return out.permute([0, 2, 1]) class Phi4MultimodalAudioConvModule(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.config = config self.kernel_size = config.kernel_size self.layer_norm = nn.LayerNorm(config.hidden_size) self.glu = Phi4MultimodalAudioGluPointWiseConv(config) self.dw_sep_conv_1d = Phi4MultimodalAudioDepthWiseSeperableConv1d(config, padding=config.kernel_size - 1) self.act = ACT2FN[config.conv_activation] self.ext_pw_conv_1d = nn.Conv1d(config.hidden_size, config.ext_pw_out_channel, kernel_size=1, stride=1) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states: torch.Tensor): hidden_states = self.glu(self.layer_norm(hidden_states)) hidden_states = self.dw_sep_conv_1d(hidden_states.permute([0, 2, 1])) if self.kernel_size > 1: hidden_states = hidden_states[:, :, : -(self.kernel_size - 1)] hidden_states = self.act(hidden_states) hidden_states = self.ext_pw_conv_1d(hidden_states) out = self.dropout(hidden_states.permute([0, 2, 1])) return out class Phi4MultimodalAudioConformerEncoderLayer(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.feed_forward_in = Phi4MultimodalAudioMLP(config) self.self_attn = Phi4MultimodalAudioAttention(config) self.conv = Phi4MultimodalAudioConvModule(config) self.feed_forward_out = Phi4MultimodalAudioMLP(config) self.layer_norm_att = nn.LayerNorm(config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, ): residual = hidden_states + 0.5 * self.feed_forward_in(hidden_states) hidden_states = self.layer_norm_att(residual) hidden_states = residual + self.self_attn(hidden_states, attention_mask) hidden_states = hidden_states + self.conv(hidden_states) hidden_states = hidden_states + 0.5 * self.feed_forward_out(hidden_states) out = self.layer_norm(hidden_states) return out class Phi4MultimodalAudioNemoConvSubsampling(torch.nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.subsampling_factor = config.time_reduction self.sampling_num = int(math.log(self.subsampling_factor, 2)) self.act_fn = ACT2FN[config.nemo_activation] conv_channels = config.nemo_conv_channels layers = [ nn.Conv2d(1, conv_channels, kernel_size=3, stride=2, padding=1), self.act_fn, ] for _ in range(self.sampling_num - 1): layers.extend( [ nn.Conv2d(conv_channels, conv_channels, kernel_size=3, stride=2, padding=1, groups=conv_channels), nn.Conv2d(conv_channels, conv_channels, kernel_size=1, stride=1, padding=0, groups=1), self.act_fn, ] ) # Aggregate the layers self.conv = torch.nn.Sequential(*layers) self.out = torch.nn.Linear(conv_channels * config.nemo_final_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor, mask: Optional[torch.Tensor]): # Unsqueeze Channel Axis hidden_states = hidden_states.unsqueeze(1) hidden_states = self.conv(hidden_states) # Flatten Channel and Frequency Axes b, _, t, _ = hidden_states.size() hidden_states = self.out(hidden_states.transpose(1, 2).reshape(b, t, -1)) if mask is None: return hidden_states, None max_audio_length = hidden_states.shape[1] feature_lens = mask.sum(1) padding_length = torch.ceil(feature_lens / self.subsampling_factor) arange_ = torch.arange(0, max_audio_length, device=hidden_states.device) pad_mask = arange_.expand(padding_length.size(0), -1) < padding_length.unsqueeze(1) return hidden_states, pad_mask.unsqueeze(1) class Phi4MultimodalAudioRelativeAttentionBias(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.max_distance = config.bias_max_distance self.symmetric = config.bias_symmetric self.num_buckets = self.max_distance if not config.bias_symmetric: self.num_buckets *= 2 self.bias_values = nn.Embedding(self.num_buckets, config.num_attention_heads) def forward(self, x): # instantiate bias compatible with shape of x max_pos = x.size(1) context_position = torch.arange(max_pos, device=x.device, dtype=torch.long)[:, None] memory_position = torch.arange(max_pos, device=x.device, dtype=torch.long)[None, :] relative_position = memory_position - context_position # clipping to a maximum distance using ops that play well with ONNX export relative_position = relative_position.masked_fill(relative_position < -self.max_distance, -self.max_distance) relative_position = relative_position.masked_fill( relative_position > self.max_distance - 1, self.max_distance - 1 ) # mapping from relative position to index in the bias parameter bias_idx = relative_position bias_idx = bias_idx.abs() if self.symmetric else bias_idx + self.num_buckets // 2 att_bias = self.bias_values(bias_idx) att_bias = att_bias.permute(2, 0, 1).unsqueeze(0) return att_bias class Phi4MultimodalAudioMeanVarianceNormLayer(nn.Module): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__() self.register_buffer("global_mean", torch.zeros(config.input_size)) self.register_buffer("global_invstd", torch.ones(config.input_size)) def forward(self, x): return (x - self.global_mean) * self.global_invstd @auto_docstring class Phi4MultimodalAudioPreTrainedModel(PreTrainedModel): config: Phi4MultimodalAudioConfig supports_gradient_checkpointing = True _no_split_modules = ["Phi4MultimodalAudioConformerEncoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Phi4MultimodalAudioGluPointWiseConv): module.b1.data.zero_() module.b2.data.zero_() class Phi4MultimodalAudioModel(Phi4MultimodalAudioPreTrainedModel): def __init__(self, config: Phi4MultimodalAudioConfig): super().__init__(config) self.config = config self.encoder_embedding = Phi4MultimodalAudioMeanVarianceNormLayer(config) self.embed = Phi4MultimodalAudioNemoConvSubsampling(config) self.relative_attention_bias_layer = Phi4MultimodalAudioRelativeAttentionBias(config) self.encoders = nn.ModuleList( [Phi4MultimodalAudioConformerEncoderLayer(config) for _ in range(config.num_blocks)] ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def _streaming_mask(self, seq_len, batch_size, chunk_size, left_chunk): # Create mask matrix for streaming # S stores start index. if chunksize is 18, s is [0,18,36,....] chunk_start_idx = np.arange(0, seq_len, chunk_size) # avoid randomness when run evaluation or decoding if self.training and np.random.rand() > 0.5: # Either first or last chunk is not complete. # If only the last one is not complete, EOS is not effective chunk_start_idx = seq_len - chunk_start_idx chunk_start_idx = chunk_start_idx[::-1] chunk_start_idx = chunk_start_idx[:-1] chunk_start_idx = np.insert(chunk_start_idx, 0, 0) enc_streaming_mask = ( adaptive_enc_mask(seq_len, chunk_start_idx, left_window=left_chunk) .unsqueeze(0) .expand([batch_size, -1, -1]) ) return enc_streaming_mask def forward_embeddings(self, hidden_states, masks): """Forwarding the inputs through the top embedding layers""" seq_len = math.ceil(hidden_states.shape[1] / self.config.time_reduction) if seq_len <= 0: raise ValueError( f"The sequence length after time reduction is invalid: {seq_len}. Your input feature is too short." ) batch_size = hidden_states.shape[0] enc_streaming_mask = self._streaming_mask(seq_len, batch_size, self.config.chunk_size, self.config.left_chunk) enc_streaming_mask = enc_streaming_mask.to(hidden_states.device) hidden_states, masks = self.embed(hidden_states, masks) streaming_mask = enc_streaming_mask if streaming_mask is not None and masks is not None: hs_mask = masks & streaming_mask elif masks is not None: hs_mask = masks else: hs_mask = streaming_mask return hidden_states, hs_mask, masks def calculate_hs_mask(self, hidden_states, device, mask): max_audio_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] enc_streaming_mask = self._streaming_mask( max_audio_length, batch_size, self.config.chunk_size, self.config.left_chunk ) enc_streaming_mask = enc_streaming_mask.to(device) if mask is None: return enc_streaming_mask feature_lens = mask.sum(1) padding_length = feature_lens pad_mask = torch.arange(0, max_audio_length, device=device).expand( padding_length.size(0), -1 ) < padding_length.unsqueeze(1) pad_mask = pad_mask.unsqueeze(1) pad_mask = pad_mask & enc_streaming_mask return pad_mask def forward(self, hidden_states: torch.Tensor, mask: Optional[torch.Tensor]): hidden_states = self.encoder_embedding(hidden_states) hidden_states, hs_mask, mask = self.forward_embeddings(hidden_states, mask) unfolded = False bs, seq_len, _ = hidden_states.shape max_seq_len = 500 # maximum position for absolute positional encoding if seq_len > max_seq_len: # audio sequence is longer than max_seq_len, unfold it into chunks of max_seq_len unfolded = True # the unfold op will drop residual frames, pad it to the multiple of max_seq_len if seq_len % max_seq_len > 0: chunk_pad_size = max_seq_len - (seq_len % max_seq_len) else: chunk_pad_size = 0 if chunk_pad_size > 0: hidden_states_pad = F.pad(hidden_states, (0, 0, 0, chunk_pad_size), "constant", 0) hidden_states = hidden_states_pad.to(hidden_states.device) hidden_states = unfold_tensor(hidden_states, max_seq_len) masks_unfold = None if mask is not None: # revise hs_mask here because the previous calculated hs_mask did not consider extra pad subsampled_pad_mask = mask.squeeze(1) # [bz, subsampled_unmask_seq_len] extra_padded_subsamlped_pad_mask = F.pad( subsampled_pad_mask, (0, chunk_pad_size), "constant", False ) # extra padding to the pad mask extra_padded_subsamlped_pad_mask = extra_padded_subsamlped_pad_mask.unsqueeze(-1).float() masks_unfold = unfold_tensor( extra_padded_subsamlped_pad_mask, max_seq_len ) # unfold the pad mask like we did to the input tensor masks_unfold = masks_unfold.squeeze(-1).bool() # unfold op does not support bool tensor hs_mask = self.calculate_hs_mask( hidden_states, hidden_states.device, masks_unfold ) # calculate hs_mask based on the unfolded pad mask relative_attention_bias = self.relative_attention_bias_layer(hidden_states) attention_mask = hs_mask.unsqueeze(1) + relative_attention_bias for layer in self.encoders: hidden_states = layer(hidden_states, attention_mask) if unfolded: embed_dim = hidden_states.shape[-1] hidden_states = hidden_states.reshape(bs, -1, embed_dim) # if we ever padded before unfolding, we need to remove the padding if chunk_pad_size > 0: hidden_states = hidden_states[:, :-chunk_pad_size, :] return hidden_states def unfold_tensor(tensor, max_seq_len): """ For a given tensor with shape of (N, T, D), if sequence length T is longer than max_seq_len, this function unfold it to a (NT', max_seq_len, D) where T' is T // max_seq_len. Args: tensor: N, T, D """ _, _, D = tensor.shape tensor = tensor.transpose(-1, -2) # N x D x 1 x T => N x (D x max_seq_len) x T' tensor = F.unfold(tensor[..., None, :], kernel_size=(1, max_seq_len), stride=(1, max_seq_len)) new_bsz, _, slen = tensor.shape tensor = tensor.view(new_bsz, -1, max_seq_len, slen) tensor = tensor.permute(0, 3, 2, 1) tensor = tensor.view(-1, max_seq_len, D).contiguous() return tensor def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0): """ The function is very important for Transformer Transducer Streaming mode Args: xs_len (int): sequence length chunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45] left_window (int): how many left chunks can be seen right_window (int): how many right chunks can be seen. It is used for chunk overlap model. Returns: mask (torch.Tensor): a mask tensor for streaming model """ chunk_start_idx = torch.Tensor(chunk_start_idx).long() start_pad = torch.nn.functional.pad( chunk_start_idx, (1, 0) ) # append 0 to the beginning, so it becomes [0, 0, 18, 36, 48] end_pad = torch.nn.functional.pad( chunk_start_idx, (0, 1), value=x_len ) # append x_len to the end, so it becomes [0,18,36,48, x_len] seq_range = torch.arange(0, x_len).unsqueeze(-1) idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[:, 1] seq_range_expand = torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1) idx_left = idx - left_window idx_left[idx_left < 0] = 0 boundary_left = start_pad[idx_left] mask_left = seq_range_expand >= boundary_left.unsqueeze(-1) idx_right = idx + right_window idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx) boundary_right = end_pad[idx_right] mask_right = seq_range_expand < boundary_right.unsqueeze(-1) return mask_left & mask_right class Phi4MultimodalAudioEmbedding(nn.Module): def __init__(self, config: Phi4MultimodalConfig): super().__init__() self.config = config self.layer_idx = config.audio_config.feature_layer self.drop = nn.Dropout(config.embd_pdrop) self.encoder = Phi4MultimodalAudioModel._from_config(config.audio_config) self.up_proj_for_speech = nn.Linear( config.audio_config.hidden_size * config.audio_config.downsample_rate, config.hidden_size ) self.down_proj_for_speech = nn.Linear(config.hidden_size, config.hidden_size) self.up_proj_for_vision_speech = nn.Linear( config.audio_config.hidden_size * config.audio_config.downsample_rate, config.hidden_size ) self.down_proj_for_vision_speech = nn.Linear(config.hidden_size, config.hidden_size) def forward( self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, audio_input_features: torch.FloatTensor, audio_embed_sizes=None, audio_attention_mask=None, audio_projection_mode="speech", ) -> torch.FloatTensor: with torch.no_grad(): positions_tuple = torch.nonzero(input_ids == self.config.audio_config.audio_token_id, as_tuple=True) up_proj = self.up_proj_for_speech if audio_projection_mode == "speech" else self.up_proj_for_vision_speech down_proj = ( self.down_proj_for_speech if audio_projection_mode == "speech" else self.down_proj_for_vision_speech ) target_device = up_proj.bias.device target_dtype = up_proj.bias.dtype audio_input_features = audio_input_features.to(device=target_device, dtype=target_dtype) audio_encoder_hidden_states = self.encoder(audio_input_features, audio_attention_mask) audio_encoder_hidden_states = up_proj(audio_encoder_hidden_states) audio_encoder_hidden_states = nn.functional.gelu(audio_encoder_hidden_states) audio_embeds = down_proj(audio_encoder_hidden_states) merged_audio_embeds = torch.cat( [audio_embeds[i, : audio_embed_sizes[i], :] for i in range(len(audio_embed_sizes))], dim=0 ) merged_audio_embeds = merged_audio_embeds.to(dtype=inputs_embeds.dtype, device=inputs_embeds.device) # Temporarily disable autocast to avoid issue on bf16 tensors # Ref: https://github.com/pytorch/pytorch/issues/132715 with torch.autocast(device_type=inputs_embeds.device.type, enabled=False): audio_embeds = inputs_embeds.index_put( indices=positions_tuple, values=merged_audio_embeds, accumulate=False ) audio_embeds = self.drop(audio_embeds) return audio_embeds #################################################### TEXT #################################################### class Phi4MultimodalRMSNorm(Phi3RMSNorm): pass class Phi4MultimodalDecoderLayer(Phi3DecoderLayer): pass class Phi4MultimodalFeatureEmbedding(nn.Module): """Image-audio embedding.""" def __init__(self, config: Phi4MultimodalConfig) -> None: super().__init__() self.config = config self.image_token_id = config.vision_config.image_token_id self.audio_token_id = config.audio_config.audio_token_id self.image_embed = Phi4MultimodalImageEmbedding(config) self.audio_embed = Phi4MultimodalAudioEmbedding(config) def forward( self, input_ids: torch.LongTensor, inputs_embeds: torch.Tensor, image_pixel_values: Optional[torch.FloatTensor] = None, audio_input_features: Optional[torch.FloatTensor] = None, image_sizes=None, image_attention_mask=None, audio_embed_sizes=None, audio_attention_mask=None, ) -> torch.FloatTensor: with torch.no_grad(): image_position_mask = (input_ids == self.config.vision_config.image_token_id).unsqueeze(-1) non_image_position_mask = ~image_position_mask image_embeds = None audio_embeds = None if image_pixel_values is not None and (input_ids == self.image_token_id).any(): image_embeds = self.image_embed( input_ids, inputs_embeds, image_pixel_values=image_pixel_values, image_sizes=image_sizes, image_attention_mask=image_attention_mask, ) if audio_input_features is not None and (input_ids == self.audio_token_id).any(): audio_projection_mode = "vision" if image_pixel_values is not None else "speech" audio_embeds = self.audio_embed( input_ids, inputs_embeds, audio_input_features=audio_input_features, audio_embed_sizes=audio_embed_sizes, audio_attention_mask=audio_attention_mask, audio_projection_mode=audio_projection_mode, ) # merge image and audio if image_embeds is not None and audio_embeds is not None: inputs_embeds = image_embeds * image_position_mask + audio_embeds * non_image_position_mask elif image_embeds is not None: inputs_embeds = image_embeds elif audio_embeds is not None: inputs_embeds = audio_embeds return inputs_embeds class Phi4MultimodalRotaryEmbedding(Phi3RotaryEmbedding): pass class Phi4MultimodalPreTrainedModel(Phi3PreTrainedModel): def _init_weights(self, module): Phi3PreTrainedModel._init_weights(self, module) if isinstance(module, Phi4MultimodalImageEmbedding): module.global_img_feature_extensor.data.zero_() module.sub_img_feature_extensor.data.zero_() class Phi4MultimodalModel(Phi3Model, nn.Module): def __init__(self, config: Phi4MultimodalConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.embed_dropout = nn.Dropout(config.embd_pdrop) self.embed_tokens_extend = Phi4MultimodalFeatureEmbedding(config) self.layers = nn.ModuleList( [Phi4MultimodalDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Phi4MultimodalRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_pixel_values: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.LongTensor] = None, image_attention_mask=None, audio_input_features: Optional[torch.FloatTensor] = None, audio_embed_sizes=None, audio_attention_mask=None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> BaseModelOutputWithPast: r""" image_pixel_values (`torch.FloatTensor`, *optional*): If the input contains images, these correspond to the pixel values after transformations (as returned by the Processor) image_sizes (`torch.LongTensor`, *optional*): If the input contains images, these correspond to size of each image. image_attention_mask (`torch.LongTensor`, *optional*): Attention mask for the images. audio_input_features (`torch.FloatTensor`, *optional*): If the input contains audio samples, these correspond to the values after transformation (as returned by the Processor). audio_embed_sizes (`torch.Tensor`, *optional*): Size of the audio inputs. audio_attention_mask (`torch.Tensor, *optional*): Attention mask for the audio inputs. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = self.embed_tokens_extend( input_ids, inputs_embeds, image_pixel_values=image_pixel_values, audio_input_features=audio_input_features, image_sizes=image_sizes, image_attention_mask=image_attention_mask, audio_embed_sizes=audio_embed_sizes, audio_attention_mask=audio_attention_mask, ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class Phi4MultimodalForCausalLM(Phi3ForCausalLM, nn.Module): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = Phi4MultimodalModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_pixel_values: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.LongTensor] = None, image_attention_mask=None, audio_input_features: Optional[torch.FloatTensor] = None, audio_embed_sizes=None, audio_attention_mask=None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" image_pixel_values (`torch.FloatTensor`, *optional*): If the input contains images, these correspond to the pixel values after transformations (as returned by the Processor) image_sizes (`torch.LongTensor`, *optional*): If the input contains images, these correspond to size of each image. image_attention_mask (`torch.LongTensor`, *optional*): Attention mask for the images. audio_input_features (`torch.FloatTensor`, *optional*): If the input contains audio samples, these correspond to the values after transformation (as returned by the Processor). audio_embed_sizes (`torch.Tensor`, *optional*): Size of the audio inputs. audio_attention_mask (`torch.Tensor, *optional*): Attention mask for the audio inputs. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, Phi4MultimodalForCausalLM >>> model = Phi4MultimodalForCausalLM.from_pretrained("TBA") >>> tokenizer = AutoTokenizer.from_pretrained("TBA") >>> prompt = "This is an example script ." >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, image_pixel_values=image_pixel_values, image_sizes=image_sizes, image_attention_mask=image_attention_mask, audio_input_features=audio_input_features, audio_embed_sizes=audio_embed_sizes, audio_attention_mask=audio_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, image_pixel_values=None, image_sizes=None, image_attention_mask=None, audio_input_features=None, audio_embed_sizes=None, audio_attention_mask=None, cache_position=None, position_ids=None, use_cache=True, logits_to_keep=0, **kwargs, ): # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the # process # When the first time input length reached long and short factor switching point, enforce re-compute cache # It will cause downside of slower at this single token position, however, better than current failure. if ( past_key_values and self.config.rope_scaling and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1 ): past_length = cache_position[0] if past_length <= self.config.original_max_position_embeddings: past_key_values = None model_inputs = super().prepare_inputs_for_generation( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, image_pixel_values=image_pixel_values, image_sizes=image_sizes, image_attention_mask=image_attention_mask, audio_input_features=audio_input_features, audio_embed_sizes=audio_embed_sizes, audio_attention_mask=audio_attention_mask, cache_position=cache_position, position_ids=position_ids, use_cache=use_cache, logits_to_keep=logits_to_keep, **kwargs, ) return model_inputs __all__ = [ "Phi4MultimodalAudioPreTrainedModel", "Phi4MultimodalAudioModel", "Phi4MultimodalVisionPreTrainedModel", "Phi4MultimodalVisionModel", "Phi4MultimodalPreTrainedModel", "Phi4MultimodalModel", "Phi4MultimodalForCausalLM", "Phi4MultimodalVisionConfig", "Phi4MultimodalAudioConfig", "Phi4MultimodalConfig", ]
transformers/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py/0
{ "file_path": "transformers/src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py", "repo_id": "transformers", "token_count": 33545 }
504
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Pixtral.""" import math from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( pad, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_kwargs, validate_preprocess_arguments, ) from ...utils import TensorType, is_vision_available, logging from ...utils.import_utils import requires_backends logger = logging.get_logger(__name__) if is_vision_available(): import PIL # Adapted from function in image_transforms.py to ensure any transparent pixels are converted to white. def convert_to_rgb(image: ImageInput) -> ImageInput: """ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert. """ requires_backends(convert_to_rgb, ["vision"]) if not isinstance(image, PIL.Image.Image): return image if image.mode == "RGB": return image # First we convert to RGBA to set background to white. image = image.convert("RGBA") # Create a new image with a white background. new_image = PIL.Image.new("RGBA", image.size, "WHITE") new_image.paste(image, (0, 0), image) new_image = new_image.convert("RGB") return new_image def _num_image_tokens(image_size: tuple[int, int], patch_size: tuple[int, int]) -> int: """ Calculate the number of image tokens given the image size and patch size. Args: image_size (`tuple[int, int]`): The size of the image as `(height, width)`. patch_size (`tuple[int, int]`): The patch size as `(height, width)`. Returns: `int`: The number of image tokens. """ height, width = image_size patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size) num_width_tokens = (width - 1) // patch_width + 1 num_height_tokens = (height - 1) // patch_height + 1 return num_height_tokens, num_width_tokens def get_resize_output_image_size( input_image: ImageInput, size: Union[int, tuple[int, int], list[int], tuple[int]], patch_size: Union[int, tuple[int, int], list[int], tuple[int]], input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple: """ Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`ImageInput`): The image to resize. size (`int` or `tuple[int, int]`): Max image size an input image can be. Must be a dictionary with the key "longest_edge". patch_size (`int` or `tuple[int, int]`): The patch_size as `(height, width)` to use for resizing the image. If patch_size is an integer, `(patch_size, patch_size)` will be used input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `tuple`: The target (height, width) dimension of the output image after resizing. """ max_height, max_width = size if isinstance(size, (tuple, list)) else (size, size) patch_height, patch_width = patch_size if isinstance(patch_size, (tuple, list)) else (patch_size, patch_size) height, width = get_image_size(input_image, input_data_format) ratio = max(height / max_height, width / max_width) if ratio > 1: # Original implementation uses `round` which utilises bankers rounding, which can lead to surprising results # Here we use floor to ensure the image is always smaller than the given "longest_edge" height = int(math.floor(height / ratio)) width = int(math.floor(width / ratio)) num_height_tokens, num_width_tokens = _num_image_tokens((height, width), (patch_height, patch_width)) return num_height_tokens * patch_height, num_width_tokens * patch_width class PixtralImageProcessor(BaseImageProcessor): r""" Constructs a Pixtral image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 1024}`): Size of the maximum dimension of either the height or width dimension of the image. Used to control how images are resized. If either the height or width are greater than `size["longest_edge"]` then both the height and width are rescaled by `height / ratio`, `width /ratio` where `ratio = max(height / longest_edge, width / longest_edge)` patch_size (`dict[str, int]` *optional*, defaults to `{"height": 16, "width": 16}`): Size of the patches in the model, used to calculate the output image size. Can be overridden by `patch_size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. """ model_input_names = ["pixel_values", "image_sizes"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, patch_size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 1024} patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} patch_size = get_size_dict(patch_size, default_to_square=True) self.do_resize = do_resize self.size = size self.patch_size = patch_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else [0.48145466, 0.4578275, 0.40821073] self.image_std = image_std if image_std is not None else [0.26862954, 0.26130258, 0.27577711] self.do_convert_rgb = do_convert_rgb self._valid_processor_keys = [ "images", "do_resize", "size", "patch_size", "resample", "do_rescale", "rescale_factor", "do_normalize", "image_mean", "image_std", "do_convert_rgb", "return_tensors", "data_format", "input_data_format", ] def resize( self, image: np.ndarray, size: dict[str, int], patch_size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dict containing the longest possible edge of the image. patch_size (`dict[str, int]`): Patch size used to calculate the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if "longest_edge" in size: size = (size["longest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError("size must contain either 'longest_edge' or 'height' and 'width'.") if "height" in patch_size and "width" in patch_size: patch_size = (patch_size["height"], patch_size["width"]) else: raise ValueError("patch_size must contain either 'shortest_edge' or 'height' and 'width'.") output_size = get_resize_output_image_size( image, size=size, patch_size=patch_size, input_data_format=input_data_format, ) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _pad_for_batching( self, pixel_values: list[np.ndarray], image_sizes: list[list[int]], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ): """ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches. Args: pixel_values (`list[np.ndarray]`): An array of pixel values of each images of shape (`batch_size`, `height`, `width`, `channels`) image_sizes (`list[list[int]]`): A list of sizes for each image in `pixel_values` in (height, width) format. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: list[`np.ndarray`]: The padded images. """ max_shape = ( max([size[0] for size in image_sizes]), max([size[1] for size in image_sizes]), ) pixel_values = [ pad( image, padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])), data_format=data_format, input_data_format=input_data_format, ) for image, size in zip(pixel_values, image_sizes) ] return pixel_values def preprocess( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, patch_size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Describes the maximum input dimensions to the model. patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`): Patch size in the model. Used to calculate the image after resizing. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ patch_size = patch_size if patch_size is not None else self.patch_size patch_size = get_size_dict(patch_size, default_to_square=True) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys) images = make_list_of_images(images) if not valid_images(images[0]): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample, ) if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) batch_images = [] batch_image_sizes = [] for image in images: if do_resize: image = self.resize( image=image, size=size, patch_size=patch_size, resample=resample, input_data_format=input_data_format, ) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) batch_images.append(image) batch_image_sizes.append(get_image_size(image, data_format)) pixel_values = self._pad_for_batching( pixel_values=batch_images, image_sizes=batch_image_sizes, input_data_format=data_format, data_format=data_format, ) return BatchFeature( data={"pixel_values": pixel_values, "image_sizes": batch_image_sizes}, tensor_type=return_tensors ) __all__ = ["PixtralImageProcessor"]
transformers/src/transformers/models/pixtral/image_processing_pixtral.py/0
{ "file_path": "transformers/src/transformers/models/pixtral/image_processing_pixtral.py", "repo_id": "transformers", "token_count": 9348 }
505
# coding=utf-8 # Copyright 2022 Sea AI Lab and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PoolFormer model.""" import collections.abc from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_poolformer import PoolFormerConfig logger = logging.get_logger(__name__) # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->PoolFormer class PoolFormerDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class PoolFormerEmbeddings(nn.Module): """ Construct Patch Embeddings. """ def __init__(self, hidden_size, num_channels, patch_size, stride, padding, norm_layer=None): super().__init__() patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=padding) self.norm = norm_layer(hidden_size) if norm_layer else nn.Identity() def forward(self, pixel_values): embeddings = self.projection(pixel_values) embeddings = self.norm(embeddings) return embeddings class PoolFormerGroupNorm(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, H, W] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) class PoolFormerPooling(nn.Module): def __init__(self, pool_size): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, hidden_states): return self.pool(hidden_states) - hidden_states class PoolFormerOutput(nn.Module): def __init__(self, config, dropout_prob, hidden_size, intermediate_size): super().__init__() self.conv1 = nn.Conv2d(hidden_size, intermediate_size, 1) self.conv2 = nn.Conv2d(intermediate_size, hidden_size, 1) self.drop = PoolFormerDropPath(dropout_prob) if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.conv1(hidden_states) hidden_states = self.act_fn(hidden_states) hidden_states = self.drop(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.drop(hidden_states) return hidden_states class PoolFormerLayer(nn.Module): """This corresponds to the 'PoolFormerBlock' class in the original implementation.""" def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path): super().__init__() self.pooling = PoolFormerPooling(pool_size) self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size) self.before_norm = PoolFormerGroupNorm(num_channels) self.after_norm = PoolFormerGroupNorm(num_channels) # Useful for training neural nets self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.use_layer_scale = config.use_layer_scale if config.use_layer_scale: self.layer_scale_1 = nn.Parameter( config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True ) self.layer_scale_2 = nn.Parameter( config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True ) def forward(self, hidden_states): if self.use_layer_scale: pooling_output = self.pooling(self.before_norm(hidden_states)) scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output # First residual connection hidden_states = hidden_states + self.drop_path(scaled_op) outputs = () layer_output = self.output(self.after_norm(hidden_states)) scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output # Second residual connection output = hidden_states + self.drop_path(scaled_op) outputs = (output,) + outputs return outputs else: pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states))) # First residual connection hidden_states = pooling_output + hidden_states outputs = () # Second residual connection inside the PoolFormerOutput block layer_output = self.drop_path(self.output(self.after_norm(hidden_states))) output = hidden_states + layer_output outputs = (output,) + outputs return outputs class PoolFormerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")] # patch embeddings embeddings = [] for i in range(config.num_encoder_blocks): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i], stride=config.strides[i], padding=config.padding[i], num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1], hidden_size=config.hidden_sizes[i], ) ) self.patch_embeddings = nn.ModuleList(embeddings) # Transformer blocks blocks = [] cur = 0 for i in range(config.num_encoder_blocks): # each block consists of layers layers = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i]): layers.append( PoolFormerLayer( config, num_channels=config.hidden_sizes[i], pool_size=config.pool_size, hidden_size=config.hidden_sizes[i], intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio), drop_path=dpr[cur + j], ) ) blocks.append(nn.ModuleList(layers)) self.block = nn.ModuleList(blocks) def forward(self, pixel_values, output_hidden_states=False, return_dict=True): all_hidden_states = () if output_hidden_states else None hidden_states = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings, self.block)): embedding_layer, block_layer = layers # Get patch embeddings from hidden_states hidden_states = embedding_layer(hidden_states) # Send the embeddings through the blocks for _, blk in enumerate(block_layer): layer_outputs = blk(hidden_states) hidden_states = layer_outputs[0] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) @auto_docstring class PoolFormerPreTrainedModel(PreTrainedModel): config: PoolFormerConfig base_model_prefix = "poolformer" main_input_name = "pixel_values" _no_split_modules = ["PoolFormerLayer"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.GroupNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, PoolFormerLayer): if hasattr(module, "layer_scale_1"): module.layer_scale_1.data.fill_(self.config.layer_scale_init_value) module.layer_scale_2.data.fill_(self.config.layer_scale_init_value) @auto_docstring class PoolFormerModel(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.encoder = PoolFormerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") encoder_outputs = self.encoder( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, ) class PoolFormerFinalPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): output = self.dense(hidden_states) return output @auto_docstring( custom_intro=""" PoolFormer Model transformer with an image classification head on top """ ) class PoolFormerForImageClassification(PoolFormerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.poolformer = PoolFormerModel(config) # Final norm self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1]) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.poolformer( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(self.norm(sequence_output).mean([-2, -1])) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) __all__ = ["PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel"]
transformers/src/transformers/models/poolformer/modeling_poolformer.py/0
{ "file_path": "transformers/src/transformers/models/poolformer/modeling_poolformer.py", "repo_id": "transformers", "token_count": 6688 }
506
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ProphetNet checkpoint.""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging logger = logging.get_logger(__name__) logging.set_verbosity_info() def convert_prophetnet_checkpoint_to_pytorch(prophetnet_checkpoint_path: str, pytorch_dump_folder_path: str): """ Copy/paste/tweak prohpetnet's weights to our prophetnet structure. """ if "xprophetnet" in prophetnet_checkpoint_path: prophet_old = XLMProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = XLMProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) else: prophet_old = ProphetNetForConditionalGenerationOld.from_pretrained(prophetnet_checkpoint_path) prophet, loading_info = ProphetNetForConditionalGeneration.from_pretrained( prophetnet_checkpoint_path, output_loading_info=True ) special_keys = ["key_proj", "value_proj", "query_proj"] mapping = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: attributes = key.split(".") if attributes[0] == "lm_head": model = prophet old_model = prophet_old else: model = prophet.prophetnet old_model = prophet_old.model is_key_init = False for attribute in attributes: if attribute in mapping: old_attribute = mapping[attribute] if not hasattr(old_model, old_attribute) and len(old_attribute) > 0: old_attribute = attribute elif hasattr(old_model, attribute): old_attribute = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" model.weight = old_model.weight logger.info(f"{attribute} is initialized.") is_key_init = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" model.bias = old_model.bias logger.info(f"{attribute} is initialized") is_key_init = True break elif attribute in special_keys and hasattr(old_model, "in_proj_weight"): embed_dim = old_model.in_proj_weight.shape[0] // 3 param = getattr(model, attribute) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": model.query_proj.weight = nn.Parameter(old_model.in_proj_weight[:embed_dim, :]) model.query_proj.bias = nn.Parameter(old_model.in_proj_bias[:embed_dim]) elif attribute == "key_proj": model.key_proj.weight = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :]) model.key_proj.bias = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim]) elif attribute == "value_proj": model.value_proj.weight = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :]) model.value_proj.bias = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :]) is_key_init = True break elif attribute == "position_embeddings": assert model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1], ( "Hidden size has to match" ) assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." model.position_embeddings.weight = nn.Parameter(old_model.embed_positions.weight[:512, :]) is_key_init = True break if attribute.isdigit(): model = model[int(attribute)] old_model = old_model[int(old_attribute)] else: model = getattr(model, attribute) if old_attribute == "": old_model = old_model else: if not hasattr(old_model, old_attribute): raise ValueError(f"{old_model} does not have {old_attribute}") old_model = getattr(old_model, old_attribute) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!") print(f"Saving model to {pytorch_dump_folder_path}") prophet.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3107 }
507
from typing import Callable, Optional import torch import torch.utils.checkpoint from packaging import version from torch import nn from ...cache_utils import Cache, DynamicCache from ...integrations import use_kernel_forward_from_hub from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import ( BaseModelOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from ...utils.import_utils import get_torch_version from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaPreTrainedModel, apply_rotary_pos_emb, eager_attention_forward, ) from ..mistral.modeling_mistral import MistralModel from .configuration_qwen2 import Qwen2Config logger = logging.get_logger(__name__) class Qwen2MLP(LlamaMLP): def __init__(self, config): super().__init__(config) self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) class Qwen2Attention(LlamaAttention): def __init__(self, config: Qwen2Config, layer_idx: int): super().__init__(config, layer_idx) self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights if version.parse(get_torch_version()) >= version.parse("2.3.0"): class Qwen2RMSNorm(nn.RMSNorm): def __init__(self, hidden_size, eps: float = 1e-6) -> None: super().__init__(normalized_shape=hidden_size, eps=eps, elementwise_affine=True) else: @use_kernel_forward_from_hub("RMSNorm") class Qwen2RMSNorm(nn.Module): def __init__(self, hidden_size, eps: float = 1e-6) -> None: """ Qwen2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class Qwen2DecoderLayer(LlamaDecoderLayer): def __init__(self, config: Qwen2Config, layer_idx: int): super().__init__(config=config, layer_idx=layer_idx) self.attention_type = config.layer_types[layer_idx] class Qwen2PreTrainedModel(LlamaPreTrainedModel): pass class Qwen2Model(MistralModel): def __init__(self, config: Qwen2Config): super().__init__(config) self.has_sliding_layers = "sliding_attention" in self.config.layer_types @check_model_inputs @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # It may already have been prepared by e.g. `generate` if not isinstance(causal_mask_mapping := attention_mask, dict): # Prepare mask arguments mask_kwargs = { "config": self.config, "input_embeds": inputs_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # Create the masks causal_mask_mapping = { "full_attention": create_causal_mask(**mask_kwargs), } # The sliding window alternating layers are not always activated depending on the config if self.has_sliding_layers: causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask_mapping[decoder_layer.attention_type], position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class Qwen2ForCausalLM(LlamaForCausalLM): pass class Qwen2ForSequenceClassification(LlamaForSequenceClassification): pass class Qwen2ForTokenClassification(LlamaForTokenClassification): pass class Qwen2ForQuestionAnswering(LlamaForQuestionAnswering): pass __all__ = [ "Qwen2PreTrainedModel", "Qwen2Model", "Qwen2ForCausalLM", "Qwen2RMSNorm", "Qwen2ForSequenceClassification", "Qwen2ForTokenClassification", "Qwen2ForQuestionAnswering", ]
transformers/src/transformers/models/qwen2/modular_qwen2.py/0
{ "file_path": "transformers/src/transformers/models/qwen2/modular_qwen2.py", "repo_id": "transformers", "token_count": 4102 }
508
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Qwen2Audio. """ from typing import Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput class Qwen2AudioProcessorKwargs(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "padding": False, }, "audio_kwargs": {}, } class Qwen2AudioProcessor(ProcessorMixin): r""" Constructs a Qwen2Audio processor which wraps a Qwen2Audio feature extractor and a Qwen2Audio tokenizer into a single processor. [`Qwen2AudioProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`Qwen2TokenizerFast`]. See the [`~Qwen2AudioProcessor.__call__`] and [`~Qwen2AudioProcessor.decode`] for more information. Args: feature_extractor ([`WhisperFeatureExtractor`], *optional*): The feature extractor is a required input. tokenizer ([`Qwen2TokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`Optional[str]`, *optional*): The Jinja template to use for formatting the conversation. If not provided, the default chat template is used. audio_token (`str`, *optional*, defaults to `"<|AUDIO|>"`): The token to use for audio tokens. audio_bos_token (`str`, *optional*, defaults to `"<|audio_bos|>"`): The token to use for audio bos tokens. audio_eos_token (`str`, *optional*, defaults to `"<|audio_eos|>"`): The token to use for audio eos tokens. """ attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = "WhisperFeatureExtractor" tokenizer_class = "AutoTokenizer" def __init__( self, feature_extractor=None, tokenizer=None, chat_template=None, audio_token="<|AUDIO|>", audio_bos_token="<|audio_bos|>", audio_eos_token="<|audio_eos|>", ): if chat_template is None: chat_template = self.default_chat_template self.audio_token = tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token self.audio_token_id = tokenizer.convert_tokens_to_ids(self.audio_token) self.audio_bos_token = tokenizer.audio_bos_token if hasattr(tokenizer, "audio_bos_token") else audio_bos_token self.audio_eos_token = tokenizer.audio_eos_token if hasattr(tokenizer, "audio_eos_token") else audio_eos_token super().__init__(feature_extractor, tokenizer, chat_template=chat_template) def __call__( self, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio: Union[np.ndarray, list[np.ndarray]] = None, **kwargs: Unpack[Qwen2AudioProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the docstring of the above two methods for more information. Args: text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audio (`np.ndarray`, `list[np.ndarray]`): The audio or batch of audios to be prepared. Each audio can be a NumPy array. """ if text is None: raise ValueError("You need to specify `text` input to process.") elif isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") output_kwargs = self._merge_kwargs( Qwen2AudioProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if audio is not None: # ensure we have as much audios as audio tokens num_audio_tokens = sum(sample.count(self.audio_token) for sample in text) num_audios = 1 if type(audio) is np.ndarray else len(audio) if num_audio_tokens != num_audios: raise ValueError( f"Found {num_audio_tokens} {self.audio_token} token{'s' if num_audio_tokens > 1 else ''} in provided text but received {num_audios} audio{'s' if num_audios > 1 else ''}" ) # Some kwargs should not be changed so we can expand text with audio tokens below output_kwargs["audio_kwargs"]["return_attention_mask"] = True output_kwargs["audio_kwargs"]["padding"] = "max_length" audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) # rename attention_mask to prevent conflicts later on audio_inputs["feature_attention_mask"] = audio_inputs.pop("attention_mask") expanded_text = [] audio_lengths = audio_inputs["feature_attention_mask"].sum(-1).tolist() for sample in text: replace_str = [] while self.audio_token in sample: audio_length = audio_lengths.pop(0) input_length = (audio_length - 1) // 2 + 1 num_audio_tokens = (input_length - 2) // 2 + 1 expanded_audio_token = self.audio_token * num_audio_tokens audio_token_start_idx = sample.find(self.audio_token) audio_token_end_idx = audio_token_start_idx + len(self.audio_token) has_bos = ( sample[audio_token_start_idx - len(self.audio_bos_token) : audio_token_start_idx] == self.audio_bos_token ) has_eos = ( sample[audio_token_end_idx : audio_token_end_idx + len(self.audio_eos_token)] == self.audio_eos_token ) # Check if this audio token is surrounded by bos/eos tokens if not has_bos and not has_eos: expanded_audio_token = self.audio_bos_token + expanded_audio_token + self.audio_eos_token replace_str.append(expanded_audio_token) sample = sample.replace(self.audio_token, "<placeholder>", 1) while "<placeholder>" in sample: sample = sample.replace("<placeholder>", replace_str.pop(0), 1) expanded_text.append(sample) text = expanded_text return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, inputs, modalities=["audio"]) if audio is not None: inputs.update(audio_inputs) return BatchFeature(data={**inputs}, tensor_type=return_tensors) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names + ["feature_attention_mask"])) @property # NOTE: we don't have default templates anymore, and the below is kept only because the hub config is not yet updated! def default_chat_template(self): """ This default vicuna template formats inputs in the form of a chat history. For each message in the chat history: * the template will output the role of the speaker followed by the content of the message. * content is a list of strings and audios. * If the content element is an audio, the template will output a sequence of <|AUDIO|> tokens Example: ```python messages = [ {'role': 'system', 'content': 'You are a helpful assistant.'}, {"role": "user", "content": [ {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"}, {"type": "text", "text": "What's that sound?"}, ]}, {"role": "assistant", "content": "It is the sound of glass shattering."}, {"role": "user", "content": [ {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav"}, {"type": "text", "text": "How about this one?"}, ]}, ] result = template.render(messages=messages, add_generation_prompt=True) ``` """ # fmt: off return ( "{% set audio_count = namespace(value=0) %}" "{% for message in messages %}" "{% if loop.first and message['role'] != 'system' %}" "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" "{% endif %}" "<|im_start|>{{ message['role'] }}\n" "{% if message['content'] is string %}" "{{ message['content'] }}<|im_end|>\n" "{% else %}" "{% for content in message['content'] %}" "{% if 'audio' in content or 'audio_url' in content or message['type'] == 'audio' or content['type'] == 'audio' %}" "{% set audio_count.value = audio_count.value + 1 %}" "Audio {{ audio_count.value }}: <|audio_bos|><|AUDIO|><|audio_eos|>\n" "{% elif 'text' in content %}" "{{ content['text'] }}" "{% endif %}" "{% endfor %}" "<|im_end|>\n" "{% endif %}" "{% endfor %}" "{% if add_generation_prompt %}" "<|im_start|>assistant\n" "{% endif %}" ) # fmt: on __all__ = ["Qwen2AudioProcessor"]
transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py/0
{ "file_path": "transformers/src/transformers/models/qwen2_audio/processing_qwen2_audio.py", "repo_id": "transformers", "token_count": 4991 }
509
# coding=utf-8 # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Qwen3MoE model configuration""" from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class Qwen3MoeConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Qwen3MoeModel`]. It is used to instantiate a Qwen3MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of [Qwen/Qwen3-15B-A2B](https://huggingface.co/Qwen/Qwen3-15B-A2B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 151936): Vocabulary size of the Qwen3MoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Qwen3MoeModel`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 6144): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details, check out [this paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*, defaults to 4096): Sliding window attention (SWA) window size. If not specified, will default to `4096`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. decoder_sparse_step (`int`, *optional*, defaults to 1): The frequency of the MoE layer. moe_intermediate_size (`int`, *optional*, defaults to 768): Intermediate size of the routed expert. num_experts_per_tok (`int`, *optional*, defaults to 8): Number of selected experts. num_experts (`int`, *optional*, defaults to 128): Number of routed experts. norm_topk_prob (`bool`, *optional*, defaults to `False`): Whether to normalize the topk probabilities. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabling this will also allow the model to output the auxiliary loss, including load balancing loss and router z-loss. router_aux_loss_coef (`float`, *optional*, defaults to 0.001): The aux loss factor for the total loss. mlp_only_layers (`list[int]`, *optional*, defaults to `[]`): Indicate which layers use Qwen3MoeMLP rather than Qwen3MoeSparseMoeBlock The list contains layer index, from 0 to num_layers-1 if we have num_layers layers If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity. ```python >>> from transformers import Qwen3MoeModel, Qwen3MoeConfig >>> # Initializing a Qwen3MoE style configuration >>> configuration = Qwen3MoeConfig() >>> # Initializing a model from the Qwen3-15B-A2B" style configuration >>> model = Qwen3MoeModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "qwen3_moe" keys_to_ignore_at_inference = ["past_key_values"] # Default tensor parallel plan for base model `Qwen3Moe` base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.experts.*.gate_proj": "colwise", "layers.*.mlp.experts.*.up_proj": "colwise", "layers.*.mlp.experts.*.down_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=151936, hidden_size=2048, intermediate_size=6144, num_hidden_layers=24, num_attention_heads=32, num_key_value_heads=4, hidden_act="silu", max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, use_sliding_window=False, sliding_window=4096, attention_dropout=0.0, decoder_sparse_step=1, moe_intermediate_size=768, num_experts_per_tok=8, num_experts=128, norm_topk_prob=False, output_router_logits=False, router_aux_loss_coef=0.001, mlp_only_layers=None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window if use_sliding_window else None self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) # MoE arguments self.decoder_sparse_step = decoder_sparse_step self.moe_intermediate_size = moe_intermediate_size self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.norm_topk_prob = norm_topk_prob self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Qwen3MoeConfig"]
transformers/src/transformers/models/qwen3_moe/configuration_qwen3_moe.py/0
{ "file_path": "transformers/src/transformers/models/qwen3_moe/configuration_qwen3_moe.py", "repo_id": "transformers", "token_count": 5182 }
510
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch REFORMER model.""" import sys from collections import namedtuple from collections.abc import Iterable from dataclasses import dataclass from functools import reduce from operator import mul from typing import Any, Optional, Union import numpy as np import torch from torch import nn from torch.autograd.function import Function from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging, ) from .configuration_reformer import ReformerConfig logger = logging.get_logger(__name__) # Define named tuples for nn.Modules here LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"]) LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"]) AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"]) ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"]) ReformerBackwardOutput = namedtuple( "ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"] ) ReformerEncoderOutput = namedtuple( "ReformerEncoderOutput", ["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"], ) class ReformerDynamicCache: """ A dynamic cache that stores past buckets instead of key/values. """ def __init__(self, _distributed_cache_data: Optional[Iterable] = None) -> None: self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen self.buckets_cache: list[torch.Tensor] = [] self.states_cache: list[torch.Tensor] = [] if _distributed_cache_data is not None: for buckets, states in _distributed_cache_data: self.buckets_cache.append(buckets) self.states_cache.append(states) def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]: """ Support for backwards-compatible `past_key_values` indexing, e.g. `past_key_values[0][0].shape[2]` to get the sequence length. """ if layer_idx < len(self): return (self.buckets_cache[layer_idx], self.states_cache[layer_idx]) else: raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}") def __iter__(self): """ Support for backwards-compatible `past_key_values` iteration, e.g. `for x in past_key_values:` to iterate over keys and values """ for layer_idx in range(len(self)): yield (self.buckets_cache[layer_idx], self.states_cache[layer_idx]) def __len__(self): """ Support for backwards-compatible `past_key_values` length, e.g. `len(past_key_values)`. This value corresponds to the number of layers in the model. """ return len(self.states_cache) def update( self, buckets: torch.Tensor, states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`. Parameters: key_states (`torch.Tensor`): The new key states to cache. value_states (`torch.Tensor`): The new value states to cache. layer_idx (`int`): The index of the layer to cache the states for. cache_kwargs (`Dict[str, Any]`, `optional`): Additional arguments for the cache subclass. No additional arguments are used in `ReformerDynamicCache`. Return: A tuple containing the updated key and value states. """ # Update the number of seen tokens if layer_idx == 0: self._seen_tokens += states.shape[-2] # Update the cache if states is not None: if len(self.states_cache) <= layer_idx: self.states_cache.append(states) else: self.states_cache[layer_idx] = torch.cat([self.states_cache[layer_idx], states], dim=1) if buckets is not None: if len(self.buckets_cache) <= layer_idx: self.buckets_cache.append(buckets) else: self.buckets_cache[layer_idx] = torch.cat([self.buckets_cache[layer_idx], buckets], dim=-1) else: # `ReformerLocalAttn` passes `None` to buckets as the module uses no buckets self.buckets_cache.append(torch.tensor([], device=self.states_cache[layer_idx].device)) return self.buckets_cache[layer_idx], self.states_cache[layer_idx] def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: return None def to_legacy_cache(self) -> tuple[tuple[torch.Tensor, torch.Tensor]]: """Converts the `ReformerDynamicCache` instance into the its equivalent in the legacy cache format. Used for backward compatibility.""" legacy_cache = () for layer_idx in range(len(self)): buckets, states = self.buckets_cache[layer_idx], self.states_cache[layer_idx] buckets = buckets if buckets.numel() != 0 else None legacy_cache += ((buckets, states),) return legacy_cache @classmethod def from_legacy_cache( cls, past_buckets_states: Optional[tuple[tuple[torch.FloatTensor, torch.FloatTensor]]] = None ) -> "ReformerDynamicCache": """Converts a cache in the legacy cache format into an equivalent `ReformerDynamicCache`. Used for backward compatibility.""" cache = cls() if past_buckets_states is not None: for layer_idx in range(len(past_buckets_states)): buckets, states = past_buckets_states[layer_idx] cache.update(buckets, states, layer_idx) return cache def _stable_argsort(vector, dim): # this function scales the vector so that torch.argsort is stable. # torch.argsort is not stable on its own scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1) scale_offset = scale_offset.expand(vector.shape) scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim]) return torch.argsort(scaled_vector, dim=dim) def _get_least_common_mult_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) def _get_min_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) class AxialPositionEmbeddings(nn.Module): """ Constructs axial position embeddings. Useful for very long input sequences to save memory and time. """ def __init__(self, config): super().__init__() self.axial_pos_shape = config.axial_pos_shape self.axial_pos_embds_dim = config.axial_pos_embds_dim self.dropout = config.hidden_dropout_prob self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config) self.weights = nn.ParameterList() if sum(self.axial_pos_embds_dim) != config.hidden_size: raise ValueError( f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to " f"config.hidden_size: {config.hidden_size}" ) # create weights for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim): # create expanded shapes ax_shape = [1] * len(self.axial_pos_shape) ax_shape[axis] = self.axial_pos_shape[axis] ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,) # create tensor and init self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32))) def forward(self, position_ids): # broadcast weights to correct shape batch_size = position_ids.shape[0] sequence_length = position_ids.shape[1] broadcasted_weights = [ weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights ] if self.training is True: if reduce(mul, self.axial_pos_shape) != sequence_length: raise ValueError( f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to " f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. " f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} " "or changing config.axial_pos_shape." ) if self.dropout > 0: weights = torch.cat(broadcasted_weights, dim=-1) # permute weights so that 2D correctly drops dims 1 and 2 transposed_weights = weights.transpose(2, 1) # drop entire matrix of last two dims (prev dims 1 and 2) dropped_transposed_weights = nn.functional.dropout2d( transposed_weights, p=self.dropout, training=self.training ) dropped_weights = dropped_transposed_weights.transpose(2, 1) position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1)) else: position_encodings = torch.cat( [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights], dim=-1, ) else: if reduce(mul, self.axial_pos_shape) < sequence_length: raise ValueError( f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to " f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, " f"{self.least_common_mult_chunk_length})." ) # compute how many columns are needed max_position_id = position_ids.max().item() required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1]) # cut to columns that are needed position_encodings = torch.cat( [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1 ) position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1])) # select correct position encodings position_encodings = torch.cat( [ torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0) for i in range(batch_size) ], dim=0, ) return position_encodings class PositionEmbeddings(nn.Module): """Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.""" def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) def forward(self, position_ids): position_embeddings = self.embedding(position_ids) position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training) return position_embeddings class ReformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.dropout = config.hidden_dropout_prob self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = ( AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config) ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange( start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if position_ids.shape[-1] > self.max_position_embeddings: raise ValueError( f"Sequence Length: {position_ids.shape[-1]} has to be less or equal than " f"config.max_position_embeddings {self.max_position_embeddings}." ) # dropout embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training) # add positional embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings return embeddings class EfficientAttentionMixin: """ A few utilities for nn.Modules in Reformer, to be used as a mixin. """ def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after): """ Used to implement attention between consecutive chunks. Args: vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...] num_chunks_before: chunks before current chunk to include in attention num_chunks_after: chunks after current chunk to include in attention Returns: tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after). """ if num_chunks_before == 0 and num_chunks_after == 0: return vectors slices = [] for i in range(-num_chunks_before, num_chunks_after + 1): if i == 0: slices.append(vectors) else: slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2)) return torch.cat(slices, dim=3) def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size): """ splits hidden_size dim into attn_head_size and num_attn_heads """ new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size) x = x.view(*new_x_shape) return x.transpose(2, 1) def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size): """ merges attn_head_size dim and num_attn_heads dim into hidden_size """ x = x.permute(0, 2, 1, 3) return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size)) def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None): """ splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims """ batch_size = vectors.shape[0] split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2) if len(vectors.shape) == 4: return torch.reshape(vectors, split_dim_shape + (attn_head_size,)) elif len(vectors.shape) == 3: return torch.reshape(vectors, split_dim_shape) else: raise ValueError(f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}") class LSHSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config, layer_idx=None): super().__init__() self.config = config self.chunk_length = config.lsh_attn_chunk_length self.num_hashes = config.num_hashes self.num_buckets = config.num_buckets self.num_chunks_before = config.lsh_num_chunks_before self.num_chunks_after = config.lsh_num_chunks_after self.hash_seed = config.hash_seed self.is_decoder = config.is_decoder self.max_position_embeddings = config.max_position_embeddings self.layer_idx = layer_idx self.dropout = config.lsh_attention_probs_dropout_prob self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) # save mask value here. Need fp32 and fp16 mask values self.register_buffer("self_mask_value_float16", torch.tensor(-1e3), persistent=False) self.register_buffer("self_mask_value_float32", torch.tensor(-1e5), persistent=False) self.register_buffer("mask_value_float16", torch.tensor(-1e4), persistent=False) self.register_buffer("mask_value_float32", torch.tensor(-1e9), persistent=False) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, cache_position=None, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # num hashes can optionally be overwritten by user num_hashes = num_hashes if num_hashes is not None else self.num_hashes # check if cache shall be used and that hidden states are already cached exists_cache = past_buckets_states is not None and len(past_buckets_states) > self.layer_idx if exists_cache: assert sequence_length == 1, ( "At the moment, auto-regressive language generation is only possible one word at a time. Make sure" f" that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed." ) # get query vector query_vectors = self.query_key(hidden_states) query_vectors = self._split_hidden_size_dim( query_vectors, self.num_attention_heads, self.attention_head_size ) past_buckets = past_buckets_states.buckets_cache[self.layer_idx] past_states = past_buckets_states.states_cache[self.layer_idx] if past_buckets.numel() != 0: key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets( query_vectors=query_vectors, attention_mask=attention_mask, num_hashes=num_hashes, hidden_states=hidden_states, past_states=past_states, past_buckets=past_buckets, ) query_key_vectors = self._query_per_attn_head(key_value_hidden_states) value_vectors = self._value_per_attn_head(key_value_hidden_states) # split key & value vectors by num hashes to apply # self attention on each separately query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) # repeat query vectors across hash dimension query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1) else: key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1) query_key_vectors = self.query_key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) else: # project hidden_states to query_key and value query_vectors = None query_key_vectors = self.query_key(hidden_states) value_vectors = self.value(hidden_states) # if query key is not already split if not exists_cache or past_buckets.numel() == 0: query_key_vectors = self._split_hidden_size_dim( query_key_vectors, self.num_attention_heads, self.attention_head_size ) value_vectors = self._split_hidden_size_dim( value_vectors, self.num_attention_heads, self.attention_head_size ) # cache buckets for next incremental decoding if exists_cache and key_value_hidden_states.shape[1] >= self.chunk_length: buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) # free memory del hidden_states assert query_key_vectors.shape[-1] == self.attention_head_size, ( f"last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}." ) assert value_vectors.shape[-1] == self.attention_head_size, ( f"last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." ) do_standard_self_attention = (sequence_length <= self.chunk_length) or ( exists_cache and past_states is not None ) # LSH attention only makes sense if chunked attention should be performed if not do_standard_self_attention: # set `num_buckets` on the fly, recommended way to do it if self.num_buckets is None: self._set_num_buckets(sequence_length) # use cached buckets for backprop only if buckets is None: # hash query key vectors into buckets buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) else: # make sure buckets has correct shape for LSH attention buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length) assert int(buckets.shape[-1]) == num_hashes * sequence_length, ( f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}" ) sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx( sequence_length, buckets, num_hashes ) # make sure bucket idx is not longer then sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length # cluster query key value vectors according to hashed buckets query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes) value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes) query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) elif exists_cache and past_buckets.numel() != 0: # use max sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx else: # get sequence length indices sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # scale key vectors sqrt_num = np.sqrt(self.attention_head_size) key_vectors = self._len_and_dim_norm(query_key_vectors, sqrt_num) # set query_vectors to query key vectors if LSH self attention query_vectors = query_vectors if query_vectors is not None else query_key_vectors # free memory del query_key_vectors # get attention probs out_vectors, logits, attention_probs = self._attend( query_vectors=query_vectors, key_vectors=key_vectors, value_vectors=value_vectors, sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash, attention_mask=attention_mask, head_mask=head_mask, do_standard_self_attention=do_standard_self_attention, use_cache=exists_cache, ) # free memory del key_vectors, value_vectors # re-order out_vectors and logits if not do_standard_self_attention: # sort clusters back to correct ordering out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx) if not do_standard_self_attention or (exists_cache and past_buckets.numel() != 0): # sum up all hash rounds if num_hashes > 1: out_vectors = self._split_seq_length_dim_to( out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ) logits = self._split_seq_length_dim_to( logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ).unsqueeze(-1) probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True)) out_vectors = torch.sum(out_vectors * probs_vectors, dim=2) # free memory del probs_vectors # free memory del logits assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ), ( "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length," " config.attention_head_size]`." ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () if buckets is not None: buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1) return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets) def _query_per_attn_head(self, hidden_states): per_head_query_key = self.query_key.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key) return query_key_vectors def _value_per_attn_head(self, hidden_states): per_head_value = self.value.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value) return value_vectors def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False): batch_size = vectors.shape[0] # See https://huggingface.co/papers/1509.02897 # We sample a different random rotation for each round of hashing to # decrease the probability of hash misses. if isinstance(self.num_buckets, int): assert self.num_buckets % 2 == 0, ( f"There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}" ) rotation_size = self.num_buckets num_buckets = self.num_buckets else: # Factorize the hash if self.num_buckets is a list or tuple rotation_size, num_buckets = 0, 1 for bucket_factor in self.num_buckets: assert bucket_factor % 2 == 0, ( f"The number of buckets should be even, but `num_bucket`: {bucket_factor}" ) rotation_size = rotation_size + bucket_factor num_buckets = num_buckets * bucket_factor # remove gradient vectors = vectors.detach() if self.hash_seed is not None: # for determinism torch.manual_seed(self.hash_seed) rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2) # create a random self.attention_head_size x num_hashes x num_buckets/2 random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype) # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2 rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations) if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1: rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1) buckets = torch.argmax(rotated_vectors, dim=-1) else: # Get the buckets for them and combine. buckets, cur_sum, cur_product = None, 0, 1 for bucket_factor in self.num_buckets: rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)] cur_sum = cur_sum + bucket_factor // 2 rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1) if buckets is None: buckets = torch.argmax(rotated_vectors_factor, dim=-1) else: buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1)) cur_product = cur_product * bucket_factor if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]): # add an extra bucket for padding tokens only num_buckets = num_buckets + 1 # assign padding tokens extra bucket buckets_mask = attention_mask.to(torch.bool)[:, None, None, :].expand(buckets.shape) buckets = torch.where( buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device) ) elif increase_num_buckets: num_buckets = num_buckets + 1 # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len). # Next we add offsets so that bucket numbers from different hashing rounds don't overlap. offsets = torch.arange(num_hashes, device=vectors.device) offsets = (offsets * num_buckets).view((1, 1, -1, 1)) # expand to batch size and num attention heads offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:]) offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3) return offset_buckets def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes): # no gradients are needed with torch.no_grad(): # hash-based sort sorted_bucket_idx = _stable_argsort(buckets, dim=-1) # create simple indices to scatter to, to have undo sort indices = ( torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device) .view(1, 1, -1) .expand(sorted_bucket_idx.shape) ) # get undo sort undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size()) undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices) return sorted_bucket_idx, undo_sorted_bucket_idx def _set_num_buckets(self, sequence_length): # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1 # make sure buckets are power of 2 num_buckets = 2**num_buckets_pow_2 # factorize `num_buckets` if `num_buckets` becomes too large num_buckets_limit = 2 * max( int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length, ) if num_buckets > num_buckets_limit: num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)] logger.warning(f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...") # set num buckets in config to be properly saved self.config.num_buckets = num_buckets self.num_buckets = num_buckets def _attend( self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, use_cache, ): # look at previous and following chunks if chunked attention if not do_standard_self_attention: key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) # get logits and dots # (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft)) query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors # if chunked attention split bucket idxs to query and key if not do_standard_self_attention: query_bucket_idx = self._split_seq_length_dim_to( sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads ) key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after) elif use_cache and query_key_dots.ndim > 4: key_value_bucket_idx = sorted_bucket_idx_per_hash query_bucket_idx = ( key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max() ) elif use_cache and query_key_dots.ndim <= 4: query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1] key_value_bucket_idx = torch.arange( query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device )[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,)) else: query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash # get correct mask values depending on precision if query_key_dots.dtype == torch.float16: self_mask_value = self.self_mask_value_float16.half() mask_value = self.mask_value_float16.half() else: self_mask_value = self.self_mask_value_float32 mask_value = self.mask_value_float32 if not use_cache: mask = self._compute_attn_mask( query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, do_standard_self_attention, ) if mask is not None: query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # Self mask is ALWAYS applied. # From the reformer paper (https://huggingface.co/papers/2001.04451): # " While attention to the future is not allowed, typical implementations of the # Transformer do allow a position to attend to itself. # Such behavior is undesirable in a shared-QK formulation because the dot-product # of a query vector with itself will almost always be greater than the dot product of a # query vector with a vector at another position. We therefore modify the masking # to forbid a token from attending to itself, except in situations # where a token has no other valid attention targets (e.g. the first token in a sequence) " self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to( query_bucket_idx.device ) # apply self_mask query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value) # free memory del self_mask logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]` attention_probs = torch.exp(query_key_dots - logits) # free memory del query_key_dots # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if out_vectors.ndim > 4: logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1) out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) return out_vectors, logits, attention_probs def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention ): # attention mask for LSH if attention_mask is not None: # if chunked attention, the attention mask has to correspond to LSH order attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: # expand attn_mask to fit with key_value_bucket_idx shape attention_mask = attention_mask[:, None, :] attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,)) # extract attention mask from LSH sorted key_indices attention_mask = torch.gather(attention_mask, -1, key_indices) attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask def _get_relevant_hid_states_and_buckets( self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets ): # concat hidden states hidden_states = torch.cat([past_states, hidden_states], dim=1) # batch_size hidden batch_size = hidden_states.shape[0] sequence_length = hidden_states.shape[1] # check if cached buckets include pad bucket max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets) # if pad bucket was cached => need to increase num buckets for caching increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1 # retrieve query buckets query_buckets = self._hash_vectors( query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets ) # concat buckets concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1) # hash-based sort bucket_idx = _stable_argsort(concat_buckets, dim=-1) # bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength assert bucket_idx.shape == ( batch_size, self.num_attention_heads, num_hashes, sequence_length, ), ( f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but" f" has shape {bucket_idx.shape}." ) # find indices of new bucket indices relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero() # expand relevant bucket indices to its chunks relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length) relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))] # adapt bucket_idx for batch and hidden states for index select offset = torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long) bucket_idx_batch_offset = sequence_length * ( batch_size * torch.div(offset, relevant_bucket_idx_chunk.shape[-1], rounding_mode="floor") ) # add batch offset relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset hidden_states = hidden_states.reshape((-1, self.hidden_size)) # select all relevant hidden states relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch) # reshape hidden states and bucket_idx to correct output relevant_hidden_states = relevant_hidden_states.reshape( batch_size, self.num_attention_heads, -1, self.hidden_size ) relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape( batch_size, self.num_attention_heads, num_hashes, -1 ) assert ( relevant_hidden_states.shape[2] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`," f" there are {relevant_hidden_states.shape[2]} `hidden_states`." ) assert ( relevant_bucket_idx_chunk.shape[-1] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are" f" {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`." ) return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length): # get relevant indices of where chunk starts and its size start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after) # expand start indices and add correct chunk offset via arange expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size) chunk_sequence_indices = expanded_start_indices + torch.arange( total_chunk_size, device=indices.device, dtype=torch.long ).unsqueeze(0).expand(indices.shape[0], total_chunk_size) # make sure that circular logic holds via % seq len chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length # expand indices and set indices correctly indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone() indices[:, -1] = chunk_sequence_indices return indices def _len_and_dim_norm(self, vectors, sqrt_num): """ length and attention head size dim normalization """ vectors = self._len_norm(vectors) vectors = vectors / sqrt_num return vectors def _len_norm(self, x, epsilon=1e-6): """ length normalization """ variance = torch.mean(x**2, -1, keepdim=True) norm_x = x * torch.rsqrt(variance + epsilon) return norm_x def _gather_by_expansion(self, vectors, idxs, num_hashes): """ expand dims of idxs and vectors for all hashes and gather """ expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size) vectors = vectors.repeat(1, 1, num_hashes, 1) return torch.gather(vectors, 2, expanded_idxs) class ReverseSort(Function): """ After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here. """ @staticmethod def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx): # save sorted_bucket_idx for backprop with torch.no_grad(): ctx.sorted_bucket_idx = sorted_bucket_idx # undo sort to have correct order for next layer expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape) out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices) logits = torch.gather(logits, 2, undo_sorted_bucket_idx) return out_vectors, logits @staticmethod def backward(ctx, grad_out_vectors, grad_logits): # get parameters saved in ctx sorted_bucket_idx = ctx.sorted_bucket_idx expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape) # reverse sort of forward grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices) grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx) # return grad and `None` fillers for last 2 forward args return grad_out_vectors, grad_logits, None, None class LocalSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config, layer_idx=None): super().__init__() self.num_attention_heads = config.num_attention_heads self.chunk_length = config.local_attn_chunk_length self.num_chunks_before = config.local_num_chunks_before self.num_chunks_after = config.local_num_chunks_after self.is_decoder = config.is_decoder self.pad_token_id = config.pad_token_id self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size self.layer_idx = layer_idx # projection matrices self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.dropout = config.local_attention_probs_dropout_prob # save mask value here self.register_buffer("mask_value_float16", torch.tensor(-1e4), persistent=False) self.register_buffer("mask_value_float32", torch.tensor(-1e9), persistent=False) def forward( self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # check if cache shall be used and that hidden states are already cached if past_buckets_states is not None and len(past_buckets_states) > self.layer_idx: past_buckets = past_buckets_states.buckets_cache[self.layer_idx] past_states = past_buckets_states.states_cache[self.layer_idx] assert past_buckets.numel() == 0, ( "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching" " hidden_states_and_buckets." ) key_value_hidden_states = self._retrieve_relevant_hidden_states( past_states, self.chunk_length, self.num_chunks_before ) key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1) # only query vector for last token query_vectors = self.query(hidden_states) # compute key and value for relevant chunk key_vectors = self.key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) # free memory del key_value_hidden_states else: # project hidden_states to query, key and value query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) # split last dim into `config.num_attention_heads` and `config.attention_head_size` query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size) key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size) value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size) assert query_vectors.shape[-1] == self.attention_head_size, ( f"last dim of query_key_vectors is {query_vectors.shape[-1]} but should be {self.attention_head_size}." ) assert key_vectors.shape[-1] == self.attention_head_size, ( f"last dim of query_key_vectors is {key_vectors.shape[-1]} but should be {self.attention_head_size}." ) assert value_vectors.shape[-1] == self.attention_head_size, ( f"last dim of query_key_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." ) if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) # normalize key vectors key_vectors = key_vectors / np.sqrt(self.attention_head_size) # get sequence length indices indices = torch.arange(sequence_length, device=query_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # if one should do normal n^2 self-attention do_standard_self_attention = sequence_length <= self.chunk_length # if input should be chunked if not do_standard_self_attention: # chunk vectors # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size query_vectors = self._split_seq_length_dim_to( query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) key_vectors = self._split_seq_length_dim_to( key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) # chunk indices query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) # append chunks before and after key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after) else: query_indices = key_indices = indices # query-key matmul: QK^T query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors mask = self._compute_attn_mask( query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention ) if mask is not None: # get mask tensor depending on half precision or not if query_key_dots.dtype == torch.float16: mask_value = self.mask_value_float16.half() else: mask_value = self.mask_value_float32 query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # softmax logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) attention_probs = torch.exp(query_key_dots - logits) # free memory del logits # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if not do_standard_self_attention: out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs) def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention ): # chunk attention mask and look before and after if attention_mask is not None: attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1) attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after) # create attn_mask attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask @staticmethod def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before): start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length return previous_hidden_states[:, start_position:] class ReformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ReformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attn_layers = config.attn_layers self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh": self.self_attention = LSHSelfAttention(config, layer_idx=layer_id) elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local": self.self_attention = LocalSelfAttention(config, layer_idx=layer_id) elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == {"lsh", "local"}: # get correct attn layers if self.attn_layers[self.layer_id] == "lsh": self.self_attention = LSHSelfAttention(config, layer_idx=layer_id) else: self.self_attention = LocalSelfAttention(config, layer_idx=layer_id) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. " "Select attn layer types from ['lsh', 'local'] only." ) self.output = ReformerSelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, cache_position=None, ): hidden_states = self.layer_norm(hidden_states) # use cached buckets for backprob if buckets not None for LSHSelfAttention self_attention_outputs = self.self_attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_attentions=output_attentions, buckets=buckets, cache_position=cache_position, ) # add buckets if necessary if hasattr(self_attention_outputs, "buckets"): buckets = self_attention_outputs.buckets else: buckets = None # cache hidden states for future use if use_cache and past_buckets_states is not None: # padded input should not be cached during prefill states = ( hidden_states[:, :orig_sequence_length] if len(past_buckets_states.states_cache) <= self.layer_id else hidden_states ) buckets = ( buckets[:, :, :, :orig_sequence_length] if ( len(past_buckets_states.buckets_cache) <= self.layer_id and buckets is not None and orig_sequence_length > 1 ) else buckets ) buckets, hidden_states = past_buckets_states.update( buckets, states[:, :orig_sequence_length], self.layer_id ) # compute attention feed forward output attention_output = self.output(self_attention_outputs.hidden_states) return AttentionOutput( hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets, ) class ReformerFeedForwardDense(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act self.dense = nn.Linear(config.hidden_size, config.feed_forward_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.act_fn(hidden_states) return hidden_states class ReformerFeedForwardOutput(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(config.feed_forward_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ChunkReformerFeedForward(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = ReformerFeedForwardDense(config) self.output = ReformerFeedForwardOutput(config) def forward(self, attention_output): return apply_chunking_to_forward( self.forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) def forward_chunk(self, hidden_states): hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense(hidden_states) return self.output(hidden_states) class ReformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = ReformerAttention(config, layer_id) # dropout requires to have the same # seed for forward and backward pass self.attention_seed = None self.feed_forward_seed = None self.feed_forward = ChunkReformerFeedForward(config) def _init_attention_seed(self): """ This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.attention_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.attention_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.attention_seed) def _init_feed_forward_seed(self): """ This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.feed_forward_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.feed_forward_seed) def forward( self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, ): with torch.no_grad(): # every forward pass we sample a different seed # for dropout and save for forward fn in backward pass # to have correct dropout if self.training: self._init_attention_seed() attn_outputs = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = attn_outputs.hidden_states # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # Y_1 = X_1 + f(X_2) attn_output = prev_attn_output + attn_output # free memory del prev_attn_output # every forward pass we sample a different seed # for dropout and save seed for forward fn in backward # to have correct dropout if self.training: self._init_feed_forward_seed() # Y_2 = X_2 + g(Y_1) hidden_states = hidden_states + self.feed_forward(attn_output) return ReformerOutput( attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets, ) def backward_pass( self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None, ): # Implements the backward pass for reversible ResNets. # A good blog post on how this works can be found here: # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py assert self.training, ( "If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the" " model into training mode." ) with torch.enable_grad(): next_attn_output.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.feed_forward_seed) # g(Y_1) res_hidden_states = self.feed_forward(next_attn_output) res_hidden_states.backward(grad_hidden_states, retain_graph=True) with torch.no_grad(): # X_2 = Y_2 - g(Y_1) hidden_states = hidden_states - res_hidden_states del res_hidden_states grad_attn_output = grad_attn_output + next_attn_output.grad next_attn_output.grad = None with torch.enable_grad(): hidden_states.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.attention_seed) # f(X_2) # use cached buckets for backprob if buckets not None for LSHSelfAttention output = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets, ).hidden_states output.backward(grad_attn_output, retain_graph=True) with torch.no_grad(): # X_1 = Y_1 - f(X_2) attn_output = next_attn_output - output del output, next_attn_output grad_hidden_states = grad_hidden_states + hidden_states.grad hidden_states.grad = None hidden_states = hidden_states.detach() return ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) class _ReversibleFunction(Function): """ To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here. This way it is made sure that no memory expensive activations are saved during the forward pass. This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py """ @staticmethod def forward( ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ): all_buckets = () # split duplicated tensor hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1) for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)): if output_hidden_states is True: all_hidden_states.append(hidden_states) layer_outputs = layer( prev_attn_output=attn_output, hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = layer_outputs.attn_output hidden_states = layer_outputs.hidden_states all_buckets = all_buckets + (layer_outputs.buckets,) if output_attentions: all_attentions.append(layer_outputs.attention_probs) # Add last layer if output_hidden_states is True: all_hidden_states.append(hidden_states) # attach params to ctx for backward ctx.save_for_backward(attn_output.detach(), hidden_states.detach()) ctx.layers = layers ctx.all_buckets = all_buckets ctx.head_mask = head_mask ctx.attention_mask = attention_mask # Concatenate 2 RevNet outputs return torch.cat([attn_output, hidden_states], dim=-1) @staticmethod def backward(ctx, grad_hidden_states): grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1) # retrieve params from ctx for backward attn_output, hidden_states = ctx.saved_tensors # create tuple output = ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) # free memory del grad_attn_output, grad_hidden_states, attn_output, hidden_states layers = ctx.layers all_buckets = ctx.all_buckets head_mask = ctx.head_mask attention_mask = ctx.attention_mask for idx, layer in enumerate(layers[::-1]): # pop last buckets from stack buckets = all_buckets[-1] all_buckets = all_buckets[:-1] # backprop output = layer.backward_pass( next_attn_output=output.attn_output, hidden_states=output.hidden_states, grad_attn_output=output.grad_attn_output, grad_hidden_states=output.grad_hidden_states, head_mask=head_mask[len(layers) - idx - 1], attention_mask=attention_mask, buckets=buckets, ) assert all_buckets == (), "buckets have to be empty after backpropagation" grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1) # num of return vars has to match num of forward() args # return gradient for hidden_states arg and None for other args return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None class ReformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)]) # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False, ): # hidden_states and attention lists to be filled if wished all_hidden_states = [] all_attentions = [] # init cached hidden states if necessary if use_cache and past_buckets_states is None: past_buckets_states = ReformerDynamicCache() elif use_cache and isinstance(past_buckets_states, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `ReformerDynamicCache` instead, e.g. " "`past_key_values=ReformerDynamicCache.from_legacy_cache(past_key_values)`." ) past_buckets_states = ReformerDynamicCache.from_legacy_cache(past_buckets_states) # concat same tensor for reversible ResNet hidden_states = torch.cat([hidden_states, hidden_states], dim=-1) hidden_states = _ReversibleFunction.apply( hidden_states, self.layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ) # Apply layer norm to concatenated hidden states hidden_states = self.layer_norm(hidden_states) # Apply dropout hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) next_cache = past_buckets_states if use_cache else None return ReformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions, past_buckets_states=next_cache, ) class ReformerOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.seq_len_dim = 1 self.chunk_size_lm_head = config.chunk_size_lm_head self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states def _tie_weights(self) -> None: # For accelerate compatibility and to not break backward compatibility if self.decoder.bias.device.type == "meta": self.decoder.bias = self.bias else: # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @auto_docstring class ReformerPreTrainedModel(PreTrainedModel): config: ReformerConfig base_model_prefix = "reformer" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" if isinstance(module, AxialPositionEmbeddings): for weight in module.weights: nn.init.normal_(weight, std=self.config.axial_norm_std) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass @auto_docstring( custom_intro=""" Output type of [`ReformerModel`]. """ ) class ReformerModelOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. """ last_hidden_state: torch.FloatTensor past_buckets_states: Optional[list[tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Output type of [`ReformerModelWithLMHead`]. """ ) class ReformerModelWithLMHeadOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_buckets_states: Optional[list[tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class ReformerModel(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config assert self.config.num_hidden_layers > 0, ( "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']" ) self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[list[tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ReformerModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*): List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding. """ use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() # noqa: F841 device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] # noqa: F841 device = inputs_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds") assert len(input_shape) == 2, ( f"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}" ) if past_buckets_states is not None: assert not self.training, "`past_buckets_states` can only be used for inference, not for training`." # prepare head mask head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True) # original sequence length for padding orig_sequence_length = input_shape[-1] # if needs padding least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config) min_chunk_length = _get_min_chunk_len(self.config) must_pad_to_match_chunk_length = ( input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > min_chunk_length and past_buckets_states is None ) if must_pad_to_match_chunk_length: padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length if self.training is True: raise ValueError( f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple " f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length " f"of {input_shape[-1] + padding_length}." ) # pad input input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, input_shape=input_shape, padding_length=padding_length, padded_seq_length=least_common_mult_chunk_length, device=device, ) # start index for position encoding depends on incremental decoding if past_buckets_states is not None: start_idx_pos_encodings = past_buckets_states[0][1].shape[1] else: start_idx_pos_encodings = 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, start_idx_pos_encodings=start_idx_pos_encodings, ) encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) sequence_output = encoder_outputs.hidden_states # if padding was applied if must_pad_to_match_chunk_length: sequence_output = sequence_output[:, :orig_sequence_length] past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None attentions = encoder_outputs.all_attentions if output_attentions else None if not return_dict: return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None) return ReformerModelOutput( last_hidden_state=sequence_output, past_buckets_states=past_buckets_states, hidden_states=hidden_states, attentions=attentions, ) def _pad_to_mult_of_chunk_length( self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None, ): logger.warning_once( f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a " f"multiple of `config.chunk_length`: {padded_seq_length}" ) padded_input_ids = torch.full( (input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long, ) # Extend `attention_mask` if attention_mask is not None: pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype) attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1) else: attention_mask = torch.cat( [ torch.ones(input_shape, device=device, dtype=torch.bool), torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.bool), ], dim=-1, ) # Extend `input_ids` with padding to match least common multiple chunk_length if input_ids is not None: input_ids = torch.cat([input_ids, padded_input_ids], dim=-1) input_shape = input_ids.size() # Pad position ids if given if position_ids is not None: padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device) padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length) position_ids = torch.cat([position_ids, padded_position_ids], dim=-1) # Extend `inputs_embeds` with padding to match least common multiple chunk_length if inputs_embeds is not None: padded_inputs_embeds = self.get_input_embeddings()(padded_input_ids) inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2) input_shape = inputs_embeds.size() return input_ids, inputs_embeds, attention_mask, position_ids, input_shape @auto_docstring( custom_intro=""" Reformer Model with a `language modeling` head on top. """ ) class ReformerModelWithLMHead(ReformerPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`." assert "local" not in self.config.attn_layers or config.local_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not" f" {config.local_num_chunks_after}." ) assert "lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not" f" {config.lsh_num_chunks_after}." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[list[tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, CausalLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*): List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) loss = None if labels is not None: loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((loss,) + output) if loss is not None else output return ReformerModelWithLMHeadOutput( loss=loss, logits=logits, past_buckets_states=reformer_outputs.past_buckets_states, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, use_cache=None, num_hashes=None, **kwargs ): # Overitten -- different expected inputs/outputs # only last token for inputs_ids if past is defined in kwargs if past_key_values is not None: input_ids = input_ids[:, -1:] inputs_dict = { "input_ids": input_ids, "past_buckets_states": past_key_values, "use_cache": use_cache, "num_hashes": num_hashes, } return inputs_dict def _reorder_cache(self, past_key_values, beam_idx): reord_past_buckets_states = [] for buckets, hidden_states in past_key_values: # buckets if buckets is not None and buckets.numel() > 0: reord_buckets = buckets.index_select(0, beam_idx.to(buckets.device)) else: reord_buckets = None # hidden states reord_hidden_states = hidden_states.index_select(0, beam_idx.to(hidden_states.device)) reord_past_buckets_states.append((reord_buckets, reord_hidden_states)) if isinstance(past_key_values, ReformerDynamicCache): reord_past_buckets_states = ReformerDynamicCache.from_legacy_cache(reord_past_buckets_states) return reord_past_buckets_states @auto_docstring class ReformerForMaskedLM(ReformerPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight", "lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) assert not config.is_decoder, ( "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional" " self-attention." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings self.lm_head.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedLMOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels <Tip warning={true}> This example uses a false checkpoint since we don't have any available pretrained model for the masked language modeling task with the Reformer architecture. </Tip> Example: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> model = ReformerForMaskedLM.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> # add mask_token >>> tokenizer.add_special_tokens({"mask_token": "[MASK]"}) # doctest: +IGNORE_RESULT >>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") >>> # resize model's embedding matrix >>> model.resize_token_embeddings(new_num_tokens=model.config.vocab_size + 1) # doctest: +IGNORE_RESULT >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> predicted_token = tokenizer.decode(predicted_token_id) ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-[MASK] tokens >>> labels = torch.where( ... inputs.input_ids == tokenizer.mask_token_id, labels[:, : inputs["input_ids"].shape[-1]], -100 ... ) >>> outputs = model(**inputs, labels=labels) >>> loss = round(outputs.loss.item(), 2) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) @auto_docstring( custom_intro=""" Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class ReformerForSequenceClassification(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.reformer = ReformerModel(config) self.classifier = ReformerClassificationHead(config) if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment") >>> model = ReformerForSequenceClassification.from_pretrained("google/reformer-crime-and-punishment") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> label = model.config.id2label[predicted_class_id] ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = ReformerForSequenceClassification.from_pretrained( ... "google/reformer-crime-and-punishment", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ReformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @auto_docstring class ReformerForQuestionAnswering(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.reformer = ReformerModel(config) # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + reformer_outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) __all__ = [ "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ]
transformers/src/transformers/models/reformer/modeling_reformer.py/0
{ "file_path": "transformers/src/transformers/models/reformer/modeling_reformer.py", "repo_id": "transformers", "token_count": 52494 }
511
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for RoFormer.""" import json from typing import Optional from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} class RoFormerTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" RoFormer tokenizer (backed by HuggingFace's *tokenizers* library). [`RoFormerTokenizerFast`] is almost identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. There are some difference between them when tokenizing Chinese. This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Example: ```python >>> from transformers import RoFormerTokenizerFast >>> tokenizer = RoFormerTokenizerFast.from_pretrained("junnyu/roformer_chinese_base") >>> tokenizer.tokenize("今天天气非常好。") ['今', '天', '天', '气', '非常', '好', '。'] ```""" vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = RoFormerTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", tokenize_chinese_chars=True, strip_accents=None, **kwargs, ): super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs, ) normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("lowercase", do_lower_case) != do_lower_case or normalizer_state.get("strip_accents", strip_accents) != strip_accents ): normalizer_class = getattr(normalizers, normalizer_state.pop("type")) normalizer_state["lowercase"] = do_lower_case normalizer_state["strip_accents"] = strip_accents self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) # Make sure we correctly set the custom PreTokenizer vocab = self.backend_tokenizer.get_vocab() self.backend_tokenizer.pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer(vocab)) self.do_lower_case = do_lower_case def __getstate__(self): state = self.__dict__.copy() state["_tokenizer"].pre_tokenizer = BertPreTokenizer() return state def __setstate__(self, d): self.__dict__ = d vocab = self.__dict__["_tokenizer"].get_vocab() self.__dict__["_tokenizer"].pre_tokenizer = PreTokenizer.custom(JiebaPreTokenizer(vocab)) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A RoFormer sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1 is not None: output += token_ids_1 + [self.sep_token_id] return output def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files) def save_pretrained( self, save_directory, legacy_format=None, filename_prefix=None, push_to_hub=False, **kwargs, ): self.backend_tokenizer.pre_tokenizer = BertPreTokenizer() return super().save_pretrained(save_directory, legacy_format, filename_prefix, push_to_hub, **kwargs) __all__ = ["RoFormerTokenizerFast"]
transformers/src/transformers/models/roformer/tokenization_roformer_fast.py/0
{ "file_path": "transformers/src/transformers/models/roformer/tokenization_roformer_fast.py", "repo_id": "transformers", "token_count": 2246 }
512
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/sam2/modular_sam2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_sam2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The Meta AI Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from transformers.utils.generic import OutputRecorder, TransformersKwargs, check_model_inputs from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ( ModelOutput, auto_docstring, ) from ..auto import AutoModel from .configuration_sam2 import ( Sam2Config, Sam2HieraDetConfig, Sam2MaskDecoderConfig, Sam2PromptEncoderConfig, Sam2VisionConfig, ) @dataclass @auto_docstring(custom_intro="Base class for the vision encoder's outputs.") class Sam2VisionEncoderOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. fpn_hidden_states (`tuple(torch.FloatTensor)`): Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape `(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck. fpn_position_encoding (`tuple(torch.FloatTensor)`): Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape `(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the model at the output of each stage. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor = None fpn_hidden_states: Optional[torch.FloatTensor] = None fpn_position_encoding: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring(custom_intro="Base class for the Sam2 model's output.") class Sam2ImageSegmentationOutput(ModelOutput): r""" iou_scores (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks)`): The Intersection over Union (IoU) scores of the predicted masks. pred_masks (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_masks, height, width)`): The predicted low-resolution masks. This is an alias for `low_res_masks`. These masks need to be post-processed by the processor to be brought to the original image size. object_score_logits (`torch.FloatTensor` of shape `(batch_size, point_batch_size, 1)`): Logits for the object score, indicating if an object is present. image_embeddings (`tuple(torch.FloatTensor)`): The features from the FPN, which are used by the mask decoder. This is a tuple of `torch.FloatTensor` where each tensor has shape `(batch_size, channels, height, width)`. vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the vision model at the output of each stage. vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the vision model. mask_decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the mask decoder. """ iou_scores: torch.FloatTensor = None pred_masks: torch.FloatTensor = None object_score_logits: torch.FloatTensor = None image_embeddings: tuple[torch.FloatTensor, ...] = None vision_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None vision_attentions: Optional[tuple[torch.FloatTensor, ...]] = None mask_decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None class Sam2PatchEmbeddings(nn.Module): r""" Turns pixel values into patch embeddings for transformer consumption. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`Sam2ImageProcessorFast.__call__`] for details. Returns: embeddings (`torch.FloatTensor`): Patch embeddings depend on image_size, patch_kernel_size, patch_stride and patch_padding """ def __init__(self, config: Sam2HieraDetConfig): super().__init__() num_channels = config.num_channels hidden_size = config.hidden_size self.projection = nn.Conv2d( num_channels, hidden_size, kernel_size=config.patch_kernel_size, stride=config.patch_stride, padding=config.patch_padding, ) def forward(self, pixel_values): _, num_channels, height, width = pixel_values.shape embeddings = self.projection(pixel_values).permute(0, 2, 3, 1) return embeddings # copied and adapted from original implementation, also practically equal to DetrSinePositionEmbedding class Sam2SinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__( self, num_pos_feats: int = 64, temperature: int = 10000, normalize: bool = False, scale: Optional[float] = None ): super().__init__() if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize self.scale = 2 * math.pi if scale is None else scale @compile_compatible_method_lru_cache(maxsize=1) def forward( self, shape: torch.Size, device: Union[torch.device, str], dtype: torch.dtype, mask: Optional[Tensor] = None, ) -> Tensor: if mask is None: mask = torch.zeros((shape[0], shape[2], shape[3]), device=device, dtype=torch.bool) not_mask = (~mask).to(dtype) y_embed = not_mask.cumsum(1) x_embed = not_mask.cumsum(2) if self.normalize: eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.num_pos_feats, dtype=torch.int64, device=device).to(dtype) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.num_pos_feats) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos class Sam2VisionNeck(nn.Module): def __init__(self, config: Sam2VisionConfig): super().__init__() self.config = config self.position_encoding = Sam2SinePositionEmbedding(num_pos_feats=config.fpn_hidden_size // 2, normalize=True) self.convs = nn.ModuleList() for in_channels in config.backbone_channel_list: self.convs.append( nn.Conv2d( in_channels=in_channels, out_channels=config.fpn_hidden_size, kernel_size=config.fpn_kernel_size, stride=config.fpn_stride, padding=config.fpn_padding, ), ) self.fpn_top_down_levels = config.fpn_top_down_levels def forward(self, hidden_states: torch.Tensor) -> tuple[tuple[torch.Tensor, ...], tuple[torch.Tensor, ...]]: fpn_hidden_states = () fpn_position_encoding = () # forward in top-down order (from low to high resolution) n = len(self.convs) - 1 for i in range(n, -1, -1): lateral_features = hidden_states[i].permute(0, 3, 1, 2) lateral_features = self.convs[n - i](lateral_features) if i not in self.fpn_top_down_levels or i == n: prev_features = lateral_features else: top_down_features = F.interpolate( prev_features.to(dtype=torch.float32), scale_factor=2.0, mode="nearest", align_corners=None, antialias=False, ).to(lateral_features.dtype) prev_features = lateral_features + top_down_features prev_position_encoding = self.position_encoding( prev_features.shape, prev_features.device, prev_features.dtype ).to(prev_features.dtype) fpn_hidden_states += (prev_features,) fpn_position_encoding += (prev_position_encoding,) return fpn_hidden_states, fpn_position_encoding def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def do_pool(x: torch.Tensor, query_stride: Optional[int] = None) -> torch.Tensor: if query_stride is None: return x # (B, H, W, C) -> (B, C, H, W) x = x.permute(0, 3, 1, 2) x = nn.functional.max_pool2d(x, kernel_size=query_stride, stride=query_stride, ceil_mode=False) # (B, C, H', W') -> (B, H', W', C) x = x.permute(0, 2, 3, 1) return x class Sam2MultiScaleAttention(nn.Module): def __init__( self, config: Sam2HieraDetConfig, dim: int, dim_out: int, num_attention_heads: int, query_stride: Optional[tuple[int, int]] = None, ): super().__init__() self.config = config self.dim = dim self.dim_out = dim_out self.query_stride = query_stride self.num_attention_heads = num_attention_heads head_dim = dim_out // num_attention_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(dim, dim_out * 3) self.proj = nn.Linear(dim_out, dim_out) self.is_causal = False def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor: batch_size, height, width, _ = hidden_states.shape # qkv with shape (B, H * W, 3, nHead, C) qkv = self.qkv(hidden_states).reshape(batch_size, height * width, 3, self.num_attention_heads, -1) # q, k, v with shape (B, H * W, nheads, C) query, key, value = torch.unbind(qkv, 2) attn_weights = (query * self.scale) @ key.transpose(-2, -1) attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) # Q pooling (for downsample at stage changes) if self.query_stride: query = do_pool(query.reshape(batch_size, height, width, -1), self.query_stride) height, width = query.shape[1:3] # downsampled shape query = query.reshape(batch_size, height * width, self.num_attention_heads, -1) # transpose query, key, value to (B, nHead, H * W, C) query = query.transpose(1, 2) key = key.transpose(1, 2) value = value.transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, _ = attention_interface( self, query, key, value, attention_mask=None, is_causal=self.is_causal, scaling=self.scale, **kwargs, ) attn_output = attn_output.reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) return attn_output class Sam2FeedForward(nn.Module): def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, activation: str = "relu", sigmoid_output: bool = False, ): super().__init__() self.num_layers = num_layers self.activation = ACT2FN[activation] self.proj_in = nn.Linear(input_dim, hidden_dim) self.proj_out = nn.Linear(hidden_dim, output_dim) self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)]) self.sigmoid_output = sigmoid_output def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) hidden_states = self.activation(hidden_states) for layer in self.layers: hidden_states = self.activation(layer(hidden_states)) hidden_states = self.proj_out(hidden_states) if self.sigmoid_output: hidden_states = F.sigmoid(hidden_states) return hidden_states def window_partition(hidden_state, window_size): """ Partition into non-overlapping windows with padding if needed. Args: hidden_state (`torch.Tensor`): Input tokens with [batch_size, height, width, num_channels]. window_size (`int`): Window size. Returns: `tuple(torch.FloatTensor)` comprising various elements: - windows: windows after partition with [batch_size * num_windows, window_size, window_size, num_channels]. - (padded_height, padded_width): padded height and width before partition """ batch_size, height, width, num_channels = hidden_state.shape pad_height = (window_size - height % window_size) % window_size pad_width = (window_size - width % window_size) % window_size # Noop in case pad_width == 0 and pad_height == 0. hidden_state = nn.functional.pad(hidden_state, (0, 0, 0, pad_width, 0, pad_height)) padded_height, padded_width = height + pad_height, width + pad_width hidden_state = hidden_state.view( batch_size, padded_height // window_size, window_size, padded_width // window_size, window_size, num_channels ) windows = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows, (padded_height, padded_width) def window_unpartition(windows, window_size, pad_height_width, height_width): """ Window unpartition into original sequences and removing padding. Args: windows (`torch.Tensor`): Input tokens with [batch_size * num_windows, window_size, window_size, num_channels]. window_size (`int`): Window size. pad_height_width (`tuple[int]`): Padded height and width (padded_height, padded_width). height_width (`tuple[int]`): Original height and width before padding. Returns: hidden_state: unpartitioned sequences with [batch_size, height, width, num_channels]. """ padded_height, padded_width = pad_height_width height, width = height_width batch_size = windows.shape[0] // (padded_height * padded_width // window_size // window_size) hidden_state = windows.view( batch_size, padded_height // window_size, padded_width // window_size, window_size, window_size, -1 ) hidden_state = hidden_state.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_state = hidden_state.view(batch_size, padded_height, padded_width, -1) # We always have height <= padded_height and width <= padded_width hidden_state = hidden_state[:, :height, :width, :].contiguous() return hidden_state class Sam2MultiScaleBlock(GradientCheckpointingLayer): def __init__( self, config: Sam2HieraDetConfig, stage_idx: int, block_idx: int, total_block_idx: int, ): super().__init__() # take embed dim from previous stage if first block of stage self.dim = ( config.embed_dim_per_stage[stage_idx - 1] if stage_idx > 0 and block_idx == 0 else config.embed_dim_per_stage[stage_idx] ) self.dim_out = config.embed_dim_per_stage[stage_idx] self.layer_norm1 = nn.LayerNorm(self.dim, eps=config.layer_norm_eps) # take window size from previous stage if first block of stage self.window_size = ( config.window_size_per_stage[stage_idx - 1] if stage_idx > 0 and block_idx == 0 else config.window_size_per_stage[stage_idx] ) self.window_size = 0 if total_block_idx in config.global_attention_blocks else self.window_size # use query stride for first block of stage if stage is a query pool stage self.query_stride = ( config.query_stride if 0 < stage_idx <= config.num_query_pool_stages and block_idx == 0 else None ) self.attn = Sam2MultiScaleAttention( config, self.dim, self.dim_out, num_attention_heads=config.num_attention_heads_per_stage[stage_idx], query_stride=self.query_stride, ) self.layer_norm2 = nn.LayerNorm(self.dim_out, eps=config.layer_norm_eps) self.mlp = Sam2FeedForward( self.dim_out, int(self.dim_out * config.mlp_ratio), self.dim_out, num_layers=2, activation=config.hidden_act, ) if self.dim != self.dim_out: self.proj = nn.Linear(self.dim, self.dim_out) def forward( self, hidden_states: torch.Tensor, **kwargs: Unpack[TransformersKwargs], ) -> torch.FloatTensor: residual = hidden_states # batch_size, height, width, channel hidden_states = self.layer_norm1(hidden_states) # Skip connection if self.dim != self.dim_out: residual = do_pool(self.proj(hidden_states), self.query_stride) # Window partition window_size = self.window_size if self.window_size > 0: H, W = hidden_states.shape[1], hidden_states.shape[2] hidden_states, pad_hw = window_partition(hidden_states, window_size) # Window Attention + Q Pooling (if stage change) attn_output = self.attn( hidden_states=hidden_states, **kwargs, ) hidden_states = attn_output if self.query_stride: # Shapes have changed due to Q pooling window_size = self.window_size // self.query_stride[0] H, W = residual.shape[1:3] pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size pad_hw = (H + pad_h, W + pad_w) # Reverse window partition if self.window_size > 0: hidden_states = window_unpartition(hidden_states, window_size, pad_hw, (H, W)) hidden_states = residual + hidden_states layernorm_output = self.layer_norm2(hidden_states) hidden_states = hidden_states + self.mlp(layernorm_output) return hidden_states @dataclass @auto_docstring( custom_intro=""" Hiera model's outputs that also contains a pooling of the last hidden states. """ ) class Sam2HieraDetModelOutput(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`): hidden-states at the output of the last layer of the model. intermediate_hidden_states (`tuple[torch.FloatTensor]` of shape `(batch_size, height, width, hidden_size)`): Sequence of hidden-states at the output of the intermediate layers of the model. """ last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @auto_docstring class Sam2PreTrainedModel(PreTrainedModel): config_class = Sam2Config base_model_prefix = "sam2" main_input_name = "pixel_values" _supports_sdpa = True _supports_flash_attn_2 = True _supports_attention_backend = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, Sam2LayerNorm)): module.weight.data.fill_(1.0) module.bias.data.zero_() if isinstance(module, Sam2HieraDetModel): if module.pos_embed is not None: module.pos_embed.data.zero_() if module.pos_embed_window is not None: module.pos_embed_window.data.zero_() if isinstance(module, Sam2Model): if module.no_memory_embedding is not None: module.no_memory_embedding.data.zero_() class Sam2HieraDetModel(Sam2PreTrainedModel): config_class = Sam2HieraDetConfig main_input_name = "pixel_values" _can_record_outputs = { "hidden_states": Sam2MultiScaleBlock, "attentions": Sam2MultiScaleAttention, } def __init__(self, config: Sam2HieraDetConfig): super().__init__(config) self.patch_embed = Sam2PatchEmbeddings(config) # Windowed positional embedding (https://arxiv.org/abs/2311.05613) self.pos_embed = nn.Parameter( torch.zeros(1, config.hidden_size, *config.window_positional_embedding_background_size) ) self.pos_embed_window = nn.Parameter( torch.zeros(1, config.hidden_size, config.window_size_per_stage[0], config.window_size_per_stage[0]) ) self.stage_ends = (np.cumsum(config.blocks_per_stage) - 1).tolist() self.blocks = nn.ModuleList() total_block_idx = 0 for stage_idx, blocks_per_stage in enumerate(config.blocks_per_stage): for block_idx in range(blocks_per_stage): block = Sam2MultiScaleBlock( config=config, stage_idx=stage_idx, block_idx=block_idx, total_block_idx=total_block_idx ) self.blocks.append(block) total_block_idx += 1 def get_input_embeddings(self): return self.patch_embed def _get_pos_embed(self, hw: tuple[int, int]) -> torch.Tensor: h, w = hw window_embed = self.pos_embed_window pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic") pos_embed = pos_embed + window_embed.tile([x // y for x, y in zip(pos_embed.shape, window_embed.shape)]) pos_embed = pos_embed.permute(0, 2, 3, 1) return pos_embed @check_model_inputs def forward( self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Sam2HieraDetModelOutput]: if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.patch_embed(pixel_values) hidden_states = hidden_states + self._get_pos_embed(hidden_states.shape[1:3]) intermediate_hidden_states = () for i, block_module in enumerate(self.blocks): hidden_states = block_module(hidden_states, **kwargs) if i in self.stage_ends: intermediate_hidden_states = intermediate_hidden_states + (hidden_states,) return Sam2HieraDetModelOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate_hidden_states, ) @auto_docstring( custom_intro=""" The vision model from Sam without any head or projection on top. """ ) class Sam2VisionModel(Sam2PreTrainedModel): config_class = Sam2VisionConfig main_input_name = "pixel_values" _can_record_outputs = { "hidden_states": Sam2MultiScaleBlock, "attentions": Sam2MultiScaleAttention, } def __init__(self, config: Sam2VisionConfig): super().__init__(config) self.config = config self.backbone = AutoModel.from_config(config.backbone_config) self.neck = Sam2VisionNeck(config) self.num_feature_levels = config.num_feature_levels self.post_init() def get_input_embeddings(self): return self.backbone.get_input_embeddings() @check_model_inputs def forward( self, pixel_values: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, Sam2VisionEncoderOutput]: if pixel_values is None: raise ValueError("You have to specify pixel_values") # Forward through backbone backbone_output = self.backbone(pixel_values, **kwargs) hidden_states = backbone_output.last_hidden_state intermediate_hidden_states = backbone_output.intermediate_hidden_states fpn_hidden_states, fpn_position_encoding = self.neck(intermediate_hidden_states) # Select last `num_feature_levels` feature levels from FPN and reverse order to get features from high to low resolution fpn_hidden_states = fpn_hidden_states[-self.num_feature_levels :][::-1] fpn_position_encoding = fpn_position_encoding[-self.num_feature_levels :][::-1] return Sam2VisionEncoderOutput( last_hidden_state=hidden_states, fpn_hidden_states=fpn_hidden_states, fpn_position_encoding=fpn_position_encoding, ) class Sam2PositionalEmbedding(nn.Module): def __init__(self, config: Sam2PromptEncoderConfig): super().__init__() self.scale = config.scale positional_embedding = self.scale * torch.randn((2, config.hidden_size // 2)) self.register_buffer("positional_embedding", positional_embedding) def forward(self, input_coords, input_shape=None): """Positionally encode points that are normalized to [0,1].""" coordinates = input_coords.clone() if input_shape is not None: coordinates[:, :, :, 0] = coordinates[:, :, :, 0] / input_shape[1] coordinates[:, :, :, 1] = coordinates[:, :, :, 1] / input_shape[0] coordinates.to(torch.float32) # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape coordinates = 2 * coordinates - 1 coordinates = coordinates.to(self.positional_embedding.dtype) coordinates = coordinates @ self.positional_embedding coordinates = 2 * np.pi * coordinates # outputs d_1 x ... x d_n x channel shape return torch.cat([torch.sin(coordinates), torch.cos(coordinates)], dim=-1) class Sam2MaskEmbedding(nn.Module): def __init__(self, config: Sam2PromptEncoderConfig): super().__init__() self.mask_input_channels = config.mask_input_channels // 4 self.activation = ACT2FN[config.hidden_act] self.conv1 = nn.Conv2d(1, self.mask_input_channels, kernel_size=2, stride=2) self.conv2 = nn.Conv2d(self.mask_input_channels, config.mask_input_channels, kernel_size=2, stride=2) self.conv3 = nn.Conv2d(config.mask_input_channels, config.hidden_size, kernel_size=1) self.layer_norm1 = Sam2LayerNorm( self.mask_input_channels, eps=config.layer_norm_eps, data_format="channels_first" ) self.layer_norm2 = Sam2LayerNorm( self.mask_input_channels * 4, eps=config.layer_norm_eps, data_format="channels_first" ) def forward(self, masks): hidden_states = self.conv1(masks) hidden_states = self.layer_norm1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layer_norm2(hidden_states) hidden_states = self.activation(hidden_states) dense_embeddings = self.conv3(hidden_states) return dense_embeddings class Sam2PromptEncoder(nn.Module): def __init__(self, config: Sam2PromptEncoderConfig): super().__init__() self.shared_embedding = Sam2PositionalEmbedding(config) self.mask_embed = Sam2MaskEmbedding(config) self.no_mask_embed = nn.Embedding(1, config.hidden_size) self.image_embedding_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) self.mask_input_size = (4 * config.image_size // config.patch_size, 4 * config.image_size // config.patch_size) self.input_image_size = config.image_size self.point_embed = nn.Embedding(config.num_point_embeddings, config.hidden_size) self.hidden_size = config.hidden_size self.not_a_point_embed = nn.Embedding(1, config.hidden_size) def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: """Embeds point prompts.""" points = points + 0.5 # Shift to center of pixel if pad: points = torch.nn.functional.pad(points, (0, 0, 0, 1), mode="constant", value=0) labels = torch.nn.functional.pad(labels, (0, 1), mode="constant", value=-1) input_shape = (self.input_image_size, self.input_image_size) point_embedding = self.shared_embedding(points, input_shape) # torch.where and expanding the labels tensor is required by the ONNX export point_embedding = torch.where(labels[..., None] == -1, self.not_a_point_embed.weight, point_embedding) # This is required for the ONNX export. The dtype, device need to be explicitly # specified as otherwise torch.onnx.export interprets as double point_embedding = torch.where( labels[..., None] != -10, point_embedding, torch.zeros_like(point_embedding), ) # Add point embeddings for labels >= 0 point_embedding = point_embedding + self.point_embed(labels.clamp(min=0)) * (labels >= 0).unsqueeze(-1) return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: """Embeds box prompts.""" boxes = boxes + 0.5 # Shift to center of pixel batch_size, nb_boxes = boxes.shape[:2] coords = boxes.reshape(batch_size, nb_boxes, 2, 2) input_shape = (self.input_image_size, self.input_image_size) corner_embedding = self.shared_embedding(coords, input_shape) corner_embedding[:, :, 0, :] += self.point_embed.weight[2] corner_embedding[:, :, 1, :] += self.point_embed.weight[3] return corner_embedding def forward( self, input_points: Optional[tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor], ) -> tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense embeddings. Args: points (`torch.Tensor`, *optional*): point coordinates and labels to embed. boxes (`torch.Tensor`, *optional*): boxes to embed masks (`torch.Tensor`, *optional*): masks to embed """ sparse_embeddings = None batch_size = 1 if input_points is not None: batch_size = input_points.shape[0] if input_labels is None: raise ValueError("If points are provided, labels must also be provided.") point_embeddings = self._embed_points(input_points, input_labels, pad=(input_boxes is None)) sparse_embeddings = point_embeddings if input_boxes is not None: batch_size = input_boxes.shape[0] box_embeddings = self._embed_boxes(input_boxes) if sparse_embeddings is None: sparse_embeddings = box_embeddings else: sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2) if input_masks is not None: dense_embeddings = self.mask_embed(input_masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) return sparse_embeddings, dense_embeddings class Sam2Attention(nn.Module): """ SAM2's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and values. """ def __init__(self, config, downsample_rate=None): super().__init__() downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate self.config = config self.hidden_size = config.hidden_size self.internal_dim = config.hidden_size // downsample_rate self.num_attention_heads = config.num_attention_heads self.head_dim = self.internal_dim // config.num_attention_heads self.scaling = self.head_dim**-0.5 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.internal_dim) self.k_proj = nn.Linear(self.hidden_size, self.internal_dim) self.v_proj = nn.Linear(self.hidden_size, self.internal_dim) self.o_proj = nn.Linear(self.internal_dim, self.hidden_size) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_similarity: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: # Input projections batch_size, point_batch_size = query.shape[:2] new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim) query = self.q_proj(query).view(*new_shape).transpose(1, 2) key = self.k_proj(key).view(*new_shape).transpose(1, 2) value = self.v_proj(value).view(*new_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask=attention_similarity, dropout=0.0 if not self.training else self.dropout_p, scaling=self.scaling, is_causal=self.is_causal, **kwargs, ) attn_output = attn_output.reshape( batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim ).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class Sam2TwoWayAttentionBlock(nn.Module): def __init__(self, config: Sam2MaskDecoderConfig, skip_first_layer_pe: bool = False): """ A transformer block with four layers: (1) self-attention of sparse inputs (2) cross attention of sparse inputs -> dense inputs (3) mlp block on sparse inputs (4) cross attention of dense inputs -> sparse inputs Arguments: config (`Sam2MaskDecoderConfig`): The configuration file used to instantiate the block attention_downsample_rate (*optionalk*, int, defaults to 2): The downsample ratio of the block used to reduce the inner dim of the attention. skip_first_layer_pe (*optional*, bool, defaults to `False`): Whether or not to skip the addition of the query_point_embedding on the first layer. """ super().__init__() self.self_attn = Sam2Attention(config, downsample_rate=1) self.layer_norm1 = nn.LayerNorm(config.hidden_size) self.cross_attn_token_to_image = Sam2Attention(config) self.layer_norm2 = nn.LayerNorm(config.hidden_size) self.mlp = Sam2FeedForward( config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers ) self.layer_norm3 = nn.LayerNorm(config.hidden_size) self.layer_norm4 = nn.LayerNorm(config.hidden_size) self.cross_attn_image_to_token = Sam2Attention(config) self.skip_first_layer_pe = skip_first_layer_pe def forward( self, queries: Tensor, keys: Tensor, query_point_embedding: Tensor, key_point_embedding: Tensor, attention_similarity: Tensor, **kwargs: Unpack[TransformersKwargs], ): # Self attention block if self.skip_first_layer_pe: queries, _ = self.self_attn(query=queries, key=queries, value=queries) else: query = queries + query_point_embedding attn_out, _ = self.self_attn(query=query, key=query, value=queries) queries = queries + attn_out queries = self.layer_norm1(queries) # Cross attention block, tokens attending to image embedding query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_token_to_image( query=query, key=key, value=keys, attention_similarity=attention_similarity ) queries = queries + attn_out queries = self.layer_norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.layer_norm3(queries) # Cross attention block, image embedding attending to tokens query = queries + query_point_embedding key = keys + key_point_embedding attn_out, _ = self.cross_attn_image_to_token(query=key, key=query, value=queries) keys = keys + attn_out keys = self.layer_norm4(keys) return queries, keys, attn_out class Sam2TwoWayTransformer(nn.Module): def __init__(self, config: Sam2MaskDecoderConfig): super().__init__() self.config = config self.num_hidden_layers = config.num_hidden_layers self.layers = nn.ModuleList() for i in range(self.num_hidden_layers): self.layers.append(Sam2TwoWayAttentionBlock(config, skip_first_layer_pe=(i == 0))) self.final_attn_token_to_image = Sam2Attention(config) self.layer_norm_final_attn = nn.LayerNorm(config.hidden_size) def forward( self, point_embeddings: Tensor, image_embeddings: Tensor, image_positional_embeddings: Tensor, attention_similarity: Tensor, target_embedding=None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, BaseModelOutput]: if image_embeddings is None: raise ValueError("You have to specify an image_embedding") image_embeddings = image_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) image_positional_embeddings = image_positional_embeddings.flatten(2).permute(0, 2, 1).unsqueeze(1) # Prepare queries queries = point_embeddings keys = image_embeddings # Apply transformer blocks and final layernorm for layer in self.layers: if target_embedding is not None: queries += target_embedding queries, keys, _ = layer( queries=queries, keys=keys, query_point_embedding=point_embeddings, key_point_embedding=image_positional_embeddings, attention_similarity=attention_similarity, **kwargs, ) # Apply the final attention layer from the points to the image query = queries + point_embeddings key = keys + image_positional_embeddings attn_out, _ = self.final_attn_token_to_image(query=query, key=key, value=keys) queries = queries + attn_out queries = self.layer_norm_final_attn(queries) return queries, keys class Sam2LayerNorm(nn.Module): r"""LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_first"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape)) self.bias = nn.Parameter(torch.zeros(normalized_shape)) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise NotImplementedError(f"Unsupported data format: {self.data_format}") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": input_dtype = x.dtype x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class Sam2MaskDecoder(nn.Module): def __init__(self, config: Sam2MaskDecoderConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_multimask_outputs = config.num_multimask_outputs self.num_mask_tokens = config.num_multimask_outputs + 1 self.iou_token = nn.Embedding(1, self.hidden_size) self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size) self.transformer = Sam2TwoWayTransformer(config) # should we create a new class for this? self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2) self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2) self.upscale_layer_norm = Sam2LayerNorm(self.hidden_size // 4, data_format="channels_first") self.activation = nn.GELU() mlps_list = [] for _ in range(self.num_mask_tokens): mlps_list += [Sam2FeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)] self.output_hypernetworks_mlps = nn.ModuleList(mlps_list) self.iou_prediction_head = Sam2FeedForward( self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth, sigmoid_output=True, ) self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1) self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1) self.obj_score_token = nn.Embedding(1, self.hidden_size) self.pred_obj_score_head = Sam2FeedForward(self.hidden_size, self.hidden_size, 1, 3) self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh def forward( self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, high_resolution_features: list[torch.Tensor], attention_similarity: Optional[torch.Tensor] = None, target_embedding: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. Args: image_embeddings (`torch.Tensor`): The embeddings from the image encoder. image_positional_embeddings (`torch.Tensor`): Positional encoding with the shape of image_embeddings. sparse_prompt_embeddings (`torch.Tensor`): The embeddings of the points and boxes. dense_prompt_embeddings (`torch.Tensor`): The embeddings of the mask inputs. multimask_output (`bool`): Whether to return multiple masks or a single mask. high_resolution_features (`list[torch.Tensor]`, *optional*): The high-resolution features from the vision encoder. attention_similarity (`torch.Tensor`, *optional*): The attention similarity tensor. target_embedding (`torch.Tensor`, *optional*): The target embedding. """ batch_size, num_channels, height, width = image_embeddings.shape point_batch_size = sparse_prompt_embeddings.shape[1] # Concatenate output tokens output_tokens = torch.cat( [ self.obj_score_token.weight, self.iou_token.weight, self.mask_tokens.weight, ], dim=0, ) output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1) if sparse_prompt_embeddings.shape[0] != 0: tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2) else: tokens = output_tokens point_embeddings = tokens.to(self.iou_token.weight.dtype) # Expand per-image data in batch direction to be per-mask image_embeddings = image_embeddings + dense_prompt_embeddings image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0) image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0) # Run the transformer point_embeddings, image_embeddings = self.transformer( point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, **kwargs, ) iou_token_out = point_embeddings[:, :, 1, :] mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens image_embeddings = image_embeddings.transpose(2, 3).view( batch_size * point_batch_size, num_channels, height, width ) feat_s0, feat_s1 = high_resolution_features feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0) feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0) upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1 upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding)) upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0) hyper_in_list: list[torch.Tensor] = [] for i in range(self.num_mask_tokens): current_mlp = self.output_hypernetworks_mlps[i] hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])] hyper_in = torch.stack(hyper_in_list, dim=2) _, num_channels, height, width = upscaled_embedding.shape upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width) masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width) # Generate mask quality predictions iou_pred = self.iou_prediction_head(iou_token_out) object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :]) # Select the correct mask or masks for output if multimask_output: mask_slice = slice(1, None) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] elif self.dynamic_multimask_via_stability and not self.training: mask_slice = slice(0, 1) masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) else: mask_slice = slice(0, 1) masks = masks[:, :, mask_slice, :, :] iou_pred = iou_pred[:, :, mask_slice] sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape return masks, iou_pred, sam_tokens_out, object_score_logits def _get_stability_scores(self, mask_logits): """ Compute stability scores of the mask logits based on the IoU between upper and lower thresholds. """ mask_logits = mask_logits.flatten(-2) stability_delta = self.dynamic_multimask_stability_delta area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) return stability_scores def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): """ When outputting a single mask, if the stability score from the current single-mask output (based on output token 0) falls below a threshold, we instead select from multi-mask outputs (based on output token 1~3) the mask with the highest predicted IoU score. This is intended to ensure a valid mask for both clicking and tracking. """ # The best mask from multimask output tokens (1~3) multimask_logits = all_mask_logits[:, :, 1:, :, :] multimask_iou_scores = all_iou_scores[:, :, 1:] best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P] best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) best_scores_inds_expanded = best_scores_inds_expanded.expand( -1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1) ) best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W] best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1] # The mask from singlemask output token 0 and its stability score singlemask_logits = all_mask_logits[:, :, 0:1, :, :] singlemask_iou_scores = all_iou_scores[:, :, 0:1] stability_scores = self._get_stability_scores(singlemask_logits) is_stable = stability_scores >= self.dynamic_multimask_stability_thresh # Dynamically fall back to best multimask output upon low stability scores. mask_logits_out = torch.where( is_stable[..., None, None].expand_as(singlemask_logits), singlemask_logits, best_multimask_logits, ) iou_scores_out = torch.where( is_stable.expand_as(singlemask_iou_scores), singlemask_iou_scores, best_multimask_iou_scores, ) return mask_logits_out, iou_scores_out @auto_docstring( custom_intro=""" Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and input points and labels, boxes, or masks. """ ) class Sam2Model(Sam2PreTrainedModel): _tied_weights_keys = ["prompt_encoder.shared_embedding.positional_embedding"] # need to be ignored, as it's a buffer and will not be correctly detected as tied weight _keys_to_ignore_on_load_missing = ["prompt_encoder.shared_embedding.positional_embedding"] _can_record_outputs = {"mask_decoder_attentions": OutputRecorder(Sam2TwoWayAttentionBlock, index=2)} _keys_to_ignore_on_load_unexpected = [ r"^memory_.*", r"^mask_downsample.*", r"^object_pointer_proj.*", r"^temporal_positional_encoding_projection_layer.*", "no_memory_positional_encoding", "no_object_pointer", "occlusion_spatial_embedding_parameter", ] def __init__(self, config: Sam2Config): super().__init__(config) self.shared_image_embedding = Sam2PositionalEmbedding(config.prompt_encoder_config) self.vision_encoder = AutoModel.from_config(config.vision_config) self.prompt_encoder = Sam2PromptEncoder(config.prompt_encoder_config) # The module using it is not a PreTrainedModel subclass so we need this config.mask_decoder_config._attn_implementation = config._attn_implementation self.mask_decoder = Sam2MaskDecoder(config.mask_decoder_config) self.num_feature_levels = config.vision_config.num_feature_levels self.backbone_feature_sizes = config.vision_config.backbone_feature_sizes # a single token to indicate no memory embedding from previous frames self.hidden_dim = config.vision_config.fpn_hidden_size self.no_memory_embedding = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) self.post_init() def _tie_weights(self): self.prompt_encoder.shared_embedding.positional_embedding.data = ( self.shared_image_embedding.positional_embedding.data ) def get_input_embeddings(self): return self.vision_encoder.get_input_embeddings() def get_image_wide_positional_embeddings(self) -> torch.Tensor: size = self.prompt_encoder.image_embedding_size target_device = self.shared_image_embedding.positional_embedding.device target_dtype = self.shared_image_embedding.positional_embedding.dtype grid = torch.ones(size, device=target_device, dtype=target_dtype) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / size[0] x_embed = x_embed / size[1] positional_embedding = self.shared_image_embedding(torch.stack([x_embed, y_embed], dim=-1)) return positional_embedding.permute(2, 0, 1).unsqueeze(0) # channel x height x width @torch.no_grad() def get_image_embeddings( self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs], ) -> list[torch.Tensor]: r""" Returns the image embeddings by passing the pixel values through the vision encoder. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input pixel values """ batch_size = pixel_values.shape[0] feature_maps, _, _, _ = self.get_image_features(pixel_values, **kwargs) # add no memory embedding to the last feature map feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding # reshape feature maps to the same shape as the backbone feature sizes image_embeddings = [ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes) ] return image_embeddings @torch.no_grad() def get_prompt_embeddings( self, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, ): r""" Returns the prompt embeddings by passing the input points, labels, boxes and masks through the prompt encoder. Args: input_points (`torch.FloatTensor` of shape `(batch_size, point_batch_size, num_points_per_image, 2)`): Optional input points for the prompt encoder. The padding of the point is automatically done by the processor. `point_batch_size` refers to the number of masks that we want the model to predict per point. The model will output `point_batch_size` times 3 masks in total. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points_per_image)`): Optional input labels for the prompt encoder. The padding of the labels is automatically done by the processor, or can be fed by the user. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes_per_image, 4)`): Optional input boxes for the prompt encoder. The padding of the boxes is automatically done by the processor. users can also pass manually the input boxes. input_masks (`torch.LongTensor` of shape `(batch_size, image_size, image_size)`): Optional input masks for the prompt encoder. """ prompt_output = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) return prompt_output @check_model_inputs @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, input_points: Optional[torch.FloatTensor] = None, input_labels: Optional[torch.LongTensor] = None, input_boxes: Optional[torch.FloatTensor] = None, input_masks: Optional[torch.LongTensor] = None, image_embeddings: Optional[torch.FloatTensor] = None, multimask_output: bool = True, attention_similarity: Optional[torch.FloatTensor] = None, target_embedding: Optional[torch.FloatTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Sam2ImageSegmentationOutput: r""" input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`): Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much better results. The points can be obtained by passing a list of list of list to the processor that will create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict per input point), the third dimension is the number of points per segmentation mask (it is possible to pass multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal) coordinates of the point. If a different number of points is passed either for each image, or for each mask, the processor will create "PAD" points that will correspond to the (0, 0) coordinate, and the computation of the embedding will be skipped for these points using the labels. input_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`): Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the official implementation, there are 3 types of labels - `1`: the point is a point that contains the object of interest - `0`: the point is a point that does not contain the object of interest - `-1`: the point corresponds to the background We added the label: - `-10`: the point is a padding point, thus should be ignored by the prompt encoder The padding labels should be automatically done by the processor. input_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`): Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. The boxes can be obtained by passing a list of list of list to the processor, that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch size, the number of boxes per image and the coordinates of the top left and bottom right point of the box. In the order (`x1`, `y1`, `x2`, `y2`): - `x1`: the x coordinate of the top left point of the input box - `y1`: the y coordinate of the top left point of the input box - `x2`: the x coordinate of the bottom right point of the input box - `y2`: the y coordinate of the bottom right point of the input box input_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`): SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`). image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`): Image embeddings, this is used by the mask decoder to generate masks and iou scores. For more memory efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings` method, and then feed them to the `forward` method instead of feeding the `pixel_values`. multimask_output (`bool`, *optional*): In the original implementation and paper, the model always outputs 3 masks per image (or per point / per bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the "best" mask, by specifying `multimask_output=False`. attention_similarity (`torch.FloatTensor`, *optional*): Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). target_embedding (`torch.FloatTensor`, *optional*): Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048). Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoModel, AutoProcessor >>> model = AutoModel.from_pretrained("danelcsb/sam2.1_hiera_tiny") >>> processor = AutoProcessor.from_pretrained("danelcsb/sam2.1_hiera_tiny") >>> img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png" >>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") >>> input_points = [[[400, 650]]] # 2D location of a window on the car >>> inputs = processor(images=raw_image, input_points=input_points, return_tensors="pt") >>> # Get segmentation mask >>> outputs = model(**inputs) >>> # Postprocess masks >>> masks = processor.post_process_masks( ... outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"] ... ) ``` """ if not ((pixel_values is None) ^ (image_embeddings is None)): raise ValueError("Exactly one of pixel_values or image_embeddings must be provided.") if input_points is not None and input_boxes is not None: if input_points.shape[1] != input_boxes.shape[1]: raise ValueError( "You should provide as many bounding boxes as input points per box. Got {} and {}.".format( input_points.shape[1], input_boxes.shape[1] ) ) image_positional_embeddings = self.get_image_wide_positional_embeddings() # repeat with batch size batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings[-1].shape[0] image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1) vision_attentions = None vision_hidden_states = None if pixel_values is not None: feature_maps, _, vision_hidden_states, vision_attentions = self.get_image_features( pixel_values, **kwargs, ) # add no memory embedding to the last feature map feature_maps[-1] = feature_maps[-1] + self.no_memory_embedding # reshape feature maps to the same shape as the backbone feature sizes image_embeddings = [ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) for feat, feat_size in zip(feature_maps, self.backbone_feature_sizes) ] if input_points is not None and input_labels is None: input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device) if input_points is None and input_boxes is None: # If no points are provide, pad with an empty point (with label -1) input_points = torch.zeros( batch_size, 1, 1, 2, dtype=image_embeddings[-1].dtype, device=image_embeddings[-1].device ) input_labels = -torch.ones(batch_size, 1, 1, dtype=torch.int32, device=image_embeddings[-1].device) if input_masks is not None: # If mask_inputs is provided, downsize it into low-res mask input if needed # and feed it as a dense mask prompt into the SAM mask encoder if input_masks.shape[-2:] != self.prompt_encoder.mask_input_size: input_masks = F.interpolate( input_masks.float(), size=self.prompt_encoder.mask_input_size, align_corners=False, mode="bilinear", antialias=True, # use antialias for downsampling ).to(input_masks.dtype) sparse_embeddings, dense_embeddings = self.prompt_encoder( input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks, ) low_res_multimasks, iou_scores, _, object_score_logits = self.mask_decoder( image_embeddings=image_embeddings[-1], image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, high_resolution_features=image_embeddings[:-1], attention_similarity=attention_similarity, target_embedding=target_embedding, **kwargs, ) return Sam2ImageSegmentationOutput( iou_scores=iou_scores, pred_masks=low_res_multimasks, object_score_logits=object_score_logits, image_embeddings=image_embeddings, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, ) def get_image_features( self, pixel_values: torch.FloatTensor, **kwargs: Unpack[TransformersKwargs], ) -> tuple[ list[torch.Tensor], list[torch.Tensor], Optional[tuple[torch.FloatTensor, ...]], Optional[tuple[torch.FloatTensor, ...]], ]: r""" Extract and preprocess image features using the vision encoder. Args: pixel_values (`torch.FloatTensor`): Input pixel values of shape `(batch_size, num_channels, height, width)`. Returns: `tuple`: A tuple containing: - feature_maps (`list[torch.Tensor]`): List of feature maps from different levels. - feature_maps_position_embeddings (`list[torch.Tensor]`): List of positional embeddings for each feature level. - vision_hidden_states (`tuple[torch.FloatTensor]`, *optional*): Hidden states from the vision encoder. - vision_attentions (`tuple[torch.FloatTensor]`, *optional*): Attention weights from the vision encoder. """ vision_outputs: Sam2VisionEncoderOutput = self.vision_encoder( pixel_values, **kwargs, ) feature_maps = vision_outputs.fpn_hidden_states feature_maps_position_embeddings = vision_outputs.fpn_position_encoding # precompute projected level 0 and level 1 features in SAM decoder # to avoid running it again on every SAM click feature_maps = list(feature_maps) feature_maps[0] = self.mask_decoder.conv_s0(feature_maps[0]) feature_maps[1] = self.mask_decoder.conv_s1(feature_maps[1]) # flatten NxCxHxW to HWxNxC feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps] feature_maps_position_embeddings = [ feature_map_position_embedding.flatten(2).permute(2, 0, 1) for feature_map_position_embedding in feature_maps_position_embeddings ] return feature_maps, feature_maps_position_embeddings, vision_outputs.hidden_states, vision_outputs.attentions __all__ = ["Sam2Model", "Sam2VisionModel", "Sam2PreTrainedModel", "Sam2HieraDetModel"]
transformers/src/transformers/models/sam2/modeling_sam2.py/0
{ "file_path": "transformers/src/transformers/models/sam2/modeling_sam2.py", "repo_id": "transformers", "token_count": 31328 }
513
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Optional import torch from ...cache_utils import Cache from ...configuration_utils import PretrainedConfig, layer_type_validation from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_rope_utils import rope_config_validation from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import logging from ...utils.deprecation import deprecate_kwarg from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaPreTrainedModel, apply_rotary_pos_emb, eager_attention_forward, ) from ..qwen2.modeling_qwen2 import Qwen2Model logger = logging.get_logger(__name__) class SmolLM3Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SmolLM3Model`]. It is used to instantiate a SmolLM3 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SmolLM3 3B. e.g. [HuggingFaceTB/SmolLM3-3B](https://huggingface.co/HuggingFaceTB/SmolLM3-3B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 128256): Vocabulary size of the SmolLM3 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SmolLM3Model`] hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 36): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 4): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `16`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 128004): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 128000): The id of the beginning of sentence token. eos_token_id (`int`, *optional*, defaults to 128001): The id of the end of sentence token. rope_theta (`float`, *optional*, defaults to 2000000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE use_sliding_window (`bool`, *optional*, defaults to `False`): Whether to use sliding window attention. sliding_window (`int`, *optional*): Sliding window attention (SWA) window size. If not specified, will default to `None`. no_rope_layers (`List[int]`, *optional*): List with at least the same length as the number of layers in the model. A `1` at an index position indicates that the corresponding layer will use RoPE, while a `0` indicates that it's a NoPE layer. no_rope_layer_interval (`int`, *optional*, defaults to 4): If `no_rope_layers` is `None`, it will be created using a NoPE layer every `no_rope_layer_interval` layers. layer_types (`list`, *optional*): Attention pattern for each layer. Automatically computed based on sliding window and NoPE settings. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. ```python >>> from transformers import SmolLM3Model, SmolLM3Config >>> # Initializing a SmolLM3 style configuration >>> configuration = SmolLM3Config() >>> # Initializing a model from the SmolLM3 style configuration >>> model = SmolLM3Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "smollm3" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=128256, hidden_size=2048, intermediate_size=11008, num_hidden_layers=36, num_attention_heads=16, num_key_value_heads=4, hidden_act="silu", max_position_embeddings=32768, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, pad_token_id=128004, bos_token_id=128000, eos_token_id=128001, rope_theta=2000000.0, rope_scaling=None, use_sliding_window=False, sliding_window=None, no_rope_layers=None, no_rope_layer_interval=4, layer_types=None, attention_bias=False, attention_dropout=0.0, mlp_bias=False, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.mlp_bias = mlp_bias self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.use_sliding_window = use_sliding_window self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout if no_rope_layers is None: self.no_rope_layers = [ int((layer_idx + 1) % no_rope_layer_interval != 0) for layer_idx in range(num_hidden_layers) ] else: self.no_rope_layers = no_rope_layers self.no_rope_layer_interval = no_rope_layer_interval # Update layer_types based on sliding window and NoPE pattern if layer_types is None: layer_types = [] for layer_idx in range(num_hidden_layers): has_rope = self.no_rope_layers[layer_idx] if use_sliding_window and sliding_window is not None and not has_rope: layer_types.append("sliding_attention") else: layer_types.append("full_attention") self.layer_types = layer_types layer_type_validation(self.layer_types) # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, move it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) class SmolLM3Attention(LlamaAttention): def __init__(self, config: SmolLM3Config, layer_idx: int): super().__init__(config, layer_idx) self.use_rope = config.no_rope_layers[layer_idx] self.sliding_window = ( config.sliding_window if config.use_sliding_window and config.layer_types[layer_idx] == "sliding_attention" else None ) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) if self.use_rope: cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: cache_kwargs = {"cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class SmolLM3DecoderLayer(LlamaDecoderLayer): def __init__(self, config: SmolLM3Config, layer_idx: int): super().__init__(config, layer_idx) self.attention_type = config.layer_types[layer_idx] class SmolLM3PreTrainedModel(LlamaPreTrainedModel): pass class SmolLM3Model(Qwen2Model): pass class SmolLM3ForCausalLM(LlamaForCausalLM): pass class SmolLM3ForSequenceClassification(LlamaForSequenceClassification): pass class SmolLM3ForTokenClassification(LlamaForTokenClassification): pass class SmolLM3ForQuestionAnswering(LlamaForQuestionAnswering): pass __all__ = [ "SmolLM3Config", "SmolLM3PreTrainedModel", "SmolLM3Model", "SmolLM3ForCausalLM", "SmolLM3ForSequenceClassification", "SmolLM3ForTokenClassification", "SmolLM3ForQuestionAnswering", ]
transformers/src/transformers/models/smollm3/modular_smollm3.py/0
{ "file_path": "transformers/src/transformers/models/smollm3/modular_smollm3.py", "repo_id": "transformers", "token_count": 6780 }
514
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Speech2Text model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class Speech2TextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Speech2TextModel`]. It is used to instantiate a Speech2Text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Speech2Text [facebook/s2t-small-librispeech-asr](https://huggingface.co/facebook/s2t-small-librispeech-asr) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 10000): Vocabulary size of the Speech2Text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Speech2TextModel`] encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. encoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. encoder_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_ffn_dim (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. decoder_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer decoder. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](https://huggingface.co/papers/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](https://huggingface.co/papers/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether the model should return the last key/values attentions (not used by all models). is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the model is set up as an encoder-decoder architecture for sequence-to-sequence tasks. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. d_model (`int`, *optional*, defaults to 256): Dimensionality of the layers and the pooler layer. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_start_token_id (`int`, *optional*, defaults to 2): The initial token ID of the decoder when decoding sequences. scale_embedding (`bool`, *optional*, defaults to `True`): Whether the embeddings are scaled by the square root of `d_model`. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. bos_token_id (`int`, *optional*, defaults to 0): The id of the beginning-of-sequence token. eos_token_id (`int`, *optional*, defaults to 2): The id of the end-of-sequence token. max_source_positions (`int`, *optional*, defaults to 6000): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically, set this to something large just in case (e.g., 512 or 1024 or 2048). num_conv_layers (`int`, *optional*, defaults to 2): Number of 1D convolutional layers in the conv module. conv_kernel_sizes (`tuple[int]`, *optional*, defaults to `(5, 5)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the conv module. The length of `conv_kernel_sizes` has to match `num_conv_layers`. conv_channels (`int`, *optional*, defaults to 1024): An integer defining the number of output channels of each convolution layers except the final one in the conv module. input_feat_per_channel (`int`, *optional*, defaults to 80): An integer specifying the size of feature vector. This is also the dimensions of log-mel filter-bank features. input_channels (`int`, *optional*, defaults to 1): An integer specifying number of input channels of the input feature vector. Example: ```python >>> from transformers import Speech2TextConfig, Speech2TextModel >>> # Initializing a Speech2Text s2t_transformer_s style configuration >>> configuration = Speech2TextConfig() >>> # Initializing a model (with random weights) from the s2t_transformer_s style configuration >>> model = Speech2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "speech_to_text" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=10000, encoder_layers=12, encoder_ffn_dim=2048, encoder_attention_heads=4, decoder_layers=6, decoder_ffn_dim=2048, decoder_attention_heads=4, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="relu", d_model=256, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=2, scale_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, max_source_positions=6000, max_target_positions=1024, num_conv_layers=2, conv_kernel_sizes=(5, 5), conv_channels=1024, input_feat_per_channel=80, input_channels=1, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.num_conv_layers = num_conv_layers self.conv_kernel_sizes = list(conv_kernel_sizes) self.conv_channels = conv_channels self.input_feat_per_channel = input_feat_per_channel self.input_channels = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, **kwargs, ) __all__ = ["Speech2TextConfig"]
transformers/src/transformers/models/speech_to_text/configuration_speech_to_text.py/0
{ "file_path": "transformers/src/transformers/models/speech_to_text/configuration_speech_to_text.py", "repo_id": "transformers", "token_count": 3842 }
515
# coding=utf-8 # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Starcoder2 model.""" from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from transformers.utils.generic import check_model_inputs from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging from ...utils.deprecation import deprecate_kwarg from ..mistral.modeling_mistral import ( MistralAttention, MistralDecoderLayer, MistralForCausalLM, MistralForSequenceClassification, MistralForTokenClassification, MistralModel, MistralRotaryEmbedding, apply_rotary_pos_emb, eager_attention_forward, ) from .configuration_starcoder2 import Starcoder2Config logger = logging.get_logger(__name__) class Starcoder2MLP(nn.Module): def __init__(self, config: Starcoder2Config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias) self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias) self.act = ACT2FN[config.hidden_act] self.residual_dropout = config.residual_dropout def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training) return hidden_states class Starcoder2Attention(MistralAttention): def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None): super().__init__(config=config, layer_idx=layer_idx) self.residual_dropout = config.residual_dropout self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) attn_output = nn.functional.dropout( attn_output, p=self.residual_dropout, training=self.training ) # diff with Llama return attn_output, attn_weights class Starcoder2DecoderLayer(MistralDecoderLayer): def __init__(self, config: Starcoder2Config, layer_idx: int): super().__init__(config, layer_idx) self.self_attn = Starcoder2Attention(config=config, layer_idx=layer_idx) self.mlp = Starcoder2MLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) class Starcoder2RotaryEmbedding(MistralRotaryEmbedding): pass class Starcoder2Model(MistralModel): def __init__(self, config: Starcoder2Config): super().__init__(config) self.layers = nn.ModuleList( [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.embedding_dropout = config.embedding_dropout @check_model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds hidden_states = nn.functional.dropout( hidden_states, p=self.embedding_dropout, training=self.training ) # main diff with Llama # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) class Starcoder2ForCausalLM(MistralForCausalLM): pass class Starcoder2ForSequenceClassification(MistralForSequenceClassification): pass class Starcoder2ForTokenClassification(MistralForTokenClassification): pass __all__ = [ "Starcoder2ForCausalLM", "Starcoder2Model", "Starcoder2PreTrainedModel", # noqa: F822 "Starcoder2ForSequenceClassification", "Starcoder2ForTokenClassification", ]
transformers/src/transformers/models/starcoder2/modular_starcoder2.py/0
{ "file_path": "transformers/src/transformers/models/starcoder2/modular_starcoder2.py", "repo_id": "transformers", "token_count": 4031 }
516
# coding=utf-8 # Copyright 2024 MBZUAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorFlow SwiftFormer model.""" import collections.abc from typing import Optional, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFImageClassifierOutputWithNoAttention, ) from ...modeling_tf_utils import TFPreTrainedModel, keras, keras_serializable, unpack_inputs from ...utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_swiftformer import SwiftFormerConfig logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "SwiftFormerConfig" # Base docstring _CHECKPOINT_FOR_DOC = "MBZUAI/swiftformer-xs" _EXPECTED_OUTPUT_SHAPE = [1, 220, 7, 7] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "MBZUAI/swiftformer-xs" _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" class TFSwiftFormerPatchEmbeddingSequential(keras.layers.Layer): """ The sequential component of the patch embedding layer. Input: tensor of shape `[batch_size, in_channels, height, width]` Output: tensor of shape `[batch_size, out_channels, height/4, width/4]` """ def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.out_chs = config.embed_dims[0] self.zero_padding = keras.layers.ZeroPadding2D(padding=(1, 1)) self.conv1 = keras.layers.Conv2D(self.out_chs // 2, kernel_size=3, strides=2, name="0") self.batch_norm1 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="1") self.conv2 = keras.layers.Conv2D(self.out_chs, kernel_size=3, strides=2, name="3") self.batch_norm2 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="4") self.config = config def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.zero_padding(x) x = self.conv1(x) x = self.batch_norm1(x, training=training) x = get_tf_activation("relu")(x) x = self.zero_padding(x) x = self.conv2(x) x = self.batch_norm2(x, training=training) x = get_tf_activation("relu")(x) return x def build(self, input_shape=None): if self.built: return if getattr(self, "conv1", None) is not None: with tf.name_scope(self.conv1.name): self.conv1.build(self.config.num_channels) if getattr(self, "batch_norm1", None) is not None: with tf.name_scope(self.batch_norm1.name): self.batch_norm1.build((None, None, None, self.out_chs // 2)) if getattr(self, "conv2", None) is not None: with tf.name_scope(self.conv2.name): self.conv2.build((None, None, None, self.out_chs // 2)) if getattr(self, "batch_norm2", None) is not None: with tf.name_scope(self.batch_norm2.name): self.batch_norm2.build((None, None, None, self.out_chs)) self.built = True class TFSwiftFormerPatchEmbedding(keras.layers.Layer): """ Patch Embedding Layer constructed of two 2D convolutional layers. Input: tensor of shape `[batch_size, in_channels, height, width]` Output: tensor of shape `[batch_size, out_channels, height/4, width/4]` """ def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.patch_embedding = TFSwiftFormerPatchEmbeddingSequential(config, name="patch_embedding") def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: return self.patch_embedding(x, training=training) def build(self, input_shape=None): if self.built: return if getattr(self, "patch_embedding", None) is not None: with tf.name_scope(self.patch_embedding.name): self.patch_embedding.build(None) self.built = True class TFSwiftFormerDropPath(keras.layers.Layer): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(**kwargs) raise NotImplementedError("Drop path is not implemented in TF port") def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: raise NotImplementedError("Drop path is not implemented in TF port") class TFSwiftFormerEmbeddings(keras.layers.Layer): """ Embeddings layer consisting of a single 2D convolutional and batch normalization layer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height/stride, width/stride]` """ def __init__(self, config: SwiftFormerConfig, index: int, **kwargs): super().__init__(**kwargs) patch_size = config.down_patch_size stride = config.down_stride padding = config.down_pad embed_dims = config.embed_dims self.in_chans = embed_dims[index] self.embed_dim = embed_dims[index + 1] patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) stride = stride if isinstance(stride, collections.abc.Iterable) else (stride, stride) padding = padding if isinstance(padding, collections.abc.Iterable) else (padding, padding) self.pad = keras.layers.ZeroPadding2D(padding=padding) self.proj = keras.layers.Conv2D(self.embed_dim, kernel_size=patch_size, strides=stride, name="proj") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.pad(x) x = self.proj(x) x = self.norm(x, training=training) return x def build(self, input_shape=None): if self.built: return if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build(self.in_chans) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.embed_dim)) self.built = True class TFSwiftFormerConvEncoder(keras.layers.Layer): """ `SwiftFormerConvEncoder` with 3*3 and 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, **kwargs): super().__init__(**kwargs) hidden_dim = int(config.mlp_ratio * dim) self.dim = dim self.pad = keras.layers.ZeroPadding2D(padding=(1, 1)) self.depth_wise_conv = keras.layers.Conv2D(dim, kernel_size=3, groups=dim, name="depth_wise_conv") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.point_wise_conv1 = keras.layers.Conv2D(hidden_dim, kernel_size=1, name="point_wise_conv1") self.act = get_tf_activation("gelu") self.point_wise_conv2 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv2") self.drop_path = keras.layers.Dropout(name="drop_path", rate=config.drop_conv_encoder_rate) self.hidden_dim = int(config.mlp_ratio * self.dim) def build(self, input_shape=None): if self.built: return self.layer_scale = self.add_weight( name="layer_scale", shape=self.dim, initializer="ones", trainable=True, ) if getattr(self, "depth_wise_conv", None) is not None: with tf.name_scope(self.depth_wise_conv.name): self.depth_wise_conv.build(self.dim) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.dim)) if getattr(self, "point_wise_conv1", None) is not None: with tf.name_scope(self.point_wise_conv1.name): self.point_wise_conv1.build(self.dim) if getattr(self, "point_wise_conv2", None) is not None: with tf.name_scope(self.point_wise_conv2.name): self.point_wise_conv2.build(self.hidden_dim) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: input = x x = self.pad(x) x = self.depth_wise_conv(x) x = self.norm(x, training=training) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x) return x class TFSwiftFormerMlp(keras.layers.Layer): """ MLP layer with 1*1 convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, in_features: int, **kwargs): super().__init__(**kwargs) hidden_features = int(in_features * config.mlp_ratio) self.norm1 = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm1") self.fc1 = keras.layers.Conv2D(hidden_features, 1, name="fc1") act_layer = get_tf_activation(config.hidden_act) self.act = act_layer self.fc2 = keras.layers.Conv2D(in_features, 1, name="fc2") self.drop = keras.layers.Dropout(rate=config.drop_mlp_rate) self.hidden_features = hidden_features self.in_features = in_features def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: x = self.norm1(x, training=training) x = self.fc1(x) x = self.act(x) x = self.drop(x, training=training) x = self.fc2(x) x = self.drop(x, training=training) return x def build(self, input_shape=None): if self.built: return if getattr(self, "norm1", None) is not None: with tf.name_scope(self.norm1.name): self.norm1.build((None, None, None, self.in_features)) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build((None, None, None, self.in_features)) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build((None, None, None, self.hidden_features)) self.built = True class TFSwiftFormerEfficientAdditiveAttention(keras.layers.Layer): """ Efficient Additive Attention module for SwiftFormer. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int = 512, **kwargs): super().__init__(**kwargs) self.dim = dim self.to_query = keras.layers.Dense(dim, name="to_query") self.to_key = keras.layers.Dense(dim, name="to_key") self.scale_factor = dim**-0.5 self.proj = keras.layers.Dense(dim, name="proj") self.final = keras.layers.Dense(dim, name="final") def build(self, input_shape=None): if self.built: return self.w_g = self.add_weight( name="w_g", shape=(self.dim, 1), initializer=keras.initializers.RandomNormal(mean=0, stddev=1), trainable=True, ) if getattr(self, "to_query", None) is not None: with tf.name_scope(self.to_query.name): self.to_query.build(self.dim) if getattr(self, "to_key", None) is not None: with tf.name_scope(self.to_key.name): self.to_key.build(self.dim) if getattr(self, "proj", None) is not None: with tf.name_scope(self.proj.name): self.proj.build(self.dim) if getattr(self, "final", None) is not None: with tf.name_scope(self.final.name): self.final.build(self.dim) self.built = True def call(self, x: tf.Tensor) -> tf.Tensor: query = self.to_query(x) key = self.to_key(x) query = tf.math.l2_normalize(query, dim=-1) key = tf.math.l2_normalize(key, dim=-1) query_weight = query @ self.w_g scaled_query_weight = query_weight * self.scale_factor scaled_query_weight = tf.nn.softmax(scaled_query_weight, axis=-1) global_queries = tf.math.reduce_sum(scaled_query_weight * query, axis=1) global_queries = tf.tile(tf.expand_dims(global_queries, 1), (1, key.shape[1], 1)) out = self.proj(global_queries * key) + query out = self.final(out) return out class TFSwiftFormerLocalRepresentation(keras.layers.Layer): """ Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, **kwargs): super().__init__(**kwargs) self.dim = dim self.pad = keras.layers.ZeroPadding2D(padding=(1, 1)) self.depth_wise_conv = keras.layers.Conv2D(dim, kernel_size=3, groups=dim, name="depth_wise_conv") self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.point_wise_conv1 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv1") self.act = get_tf_activation("gelu") self.point_wise_conv2 = keras.layers.Conv2D(dim, kernel_size=1, name="point_wise_conv2") self.drop_path = keras.layers.Identity(name="drop_path") def build(self, input_shape=None): if self.built: return self.layer_scale = self.add_weight( name="layer_scale", shape=(self.dim), initializer="ones", trainable=True, ) if getattr(self, "depth_wise_conv", None) is not None: with tf.name_scope(self.depth_wise_conv.name): self.depth_wise_conv.build((None, None, None, self.dim)) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.dim)) if getattr(self, "point_wise_conv1", None) is not None: with tf.name_scope(self.point_wise_conv1.name): self.point_wise_conv1.build(self.dim) if getattr(self, "point_wise_conv2", None) is not None: with tf.name_scope(self.point_wise_conv2.name): self.point_wise_conv2.build(self.dim) if getattr(self, "drop_path", None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: input = x x = self.pad(x) x = self.depth_wise_conv(x) x = self.norm(x, training=training) x = self.point_wise_conv1(x) x = self.act(x) x = self.point_wise_conv2(x) x = input + self.drop_path(self.layer_scale * x, training=training) return x class TFSwiftFormerEncoderBlock(keras.layers.Layer): """ SwiftFormer Encoder Block for SwiftFormer. It consists of (1) Local representation module, (2) SwiftFormerEfficientAdditiveAttention, and (3) MLP block. Input: tensor of shape `[batch_size, channels, height, width]` Output: tensor of shape `[batch_size, channels,height, width]` """ def __init__(self, config: SwiftFormerConfig, dim: int, drop_path: float = 0.0, **kwargs): super().__init__(**kwargs) layer_scale_init_value = config.layer_scale_init_value use_layer_scale = config.use_layer_scale self.local_representation = TFSwiftFormerLocalRepresentation(config, dim=dim, name="local_representation") self.attn = TFSwiftFormerEfficientAdditiveAttention(config, dim=dim, name="attn") self.linear = TFSwiftFormerMlp(config, in_features=dim, name="linear") self.drop_path = TFSwiftFormerDropPath(config) if drop_path > 0.0 else keras.layers.Identity() self.use_layer_scale = use_layer_scale if use_layer_scale: self.dim = dim self.layer_scale_init_value = layer_scale_init_value def build(self, input_shape=None): if self.built: return self.layer_scale_1 = self.add_weight( name="layer_scale_1", shape=self.dim, initializer=keras.initializers.constant(self.layer_scale_init_value), trainable=True, ) self.layer_scale_2 = self.add_weight( name="layer_scale_2", shape=self.dim, initializer=keras.initializers.constant(self.layer_scale_init_value), trainable=True, ) if getattr(self, "local_representation", None) is not None: with tf.name_scope(self.local_representation.name): self.local_representation.build(None) if getattr(self, "attn", None) is not None: with tf.name_scope(self.attn.name): self.attn.build(None) if getattr(self, "linear", None) is not None: with tf.name_scope(self.linear.name): self.linear.build(None) self.built = True def call(self, x: tf.Tensor, training: bool = False): x = self.local_representation(x, training=training) batch_size, height, width, channels = x.shape res = tf.reshape(x, [-1, height * width, channels]) res = self.attn(res) res = tf.reshape(res, [-1, height, width, channels]) if self.use_layer_scale: x = x + self.drop_path(self.layer_scale_1 * res, training=training) x = x + self.drop_path(self.layer_scale_2 * self.linear(x), training=training) else: x = x + self.drop_path(res, training=training) x = x + self.drop_path(self.linear(x), training=training) return x class TFSwiftFormerStage(keras.layers.Layer): """ A Swiftformer stage consisting of a series of `SwiftFormerConvEncoder` blocks and a final `SwiftFormerEncoderBlock`. Input: tensor in shape `[batch_size, channels, height, width]` Output: tensor in shape `[batch_size, channels, height, width]` """ def __init__(self, config: SwiftFormerConfig, index: int, **kwargs) -> None: super().__init__(**kwargs) layer_depths = config.depths dim = config.embed_dims[index] depth = layer_depths[index] self.blocks = [] for block_idx in range(depth): block_dpr = config.drop_path_rate * (block_idx + sum(layer_depths[:index])) / (sum(layer_depths) - 1) if depth - block_idx <= 1: self.blocks.append( TFSwiftFormerEncoderBlock(config, dim=dim, drop_path=block_dpr, name=f"blocks_._{block_idx}") ) else: self.blocks.append(TFSwiftFormerConvEncoder(config, dim=dim, name=f"blocks_._{block_idx}")) def call(self, input: tf.Tensor, training: bool = False) -> tf.Tensor: for i, block in enumerate(self.blocks): input = block(input, training=training) return input def build(self, input_shape=None): for layer in self.blocks: with tf.name_scope(layer.name): layer.build(None) class TFSwiftFormerEncoder(keras.layers.Layer): def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(**kwargs) self.config = config embed_dims = config.embed_dims downsamples = config.downsamples layer_depths = config.depths # Transformer model self.network = [] name_i = 0 for i in range(len(layer_depths)): stage = TFSwiftFormerStage(config, index=i, name=f"network_._{name_i}") self.network.append(stage) name_i += 1 if i >= len(layer_depths) - 1: break if downsamples[i] or embed_dims[i] != embed_dims[i + 1]: # downsampling between two stages self.network.append(TFSwiftFormerEmbeddings(config, index=i, name=f"network_._{name_i}")) name_i += 1 self.gradient_checkpointing = False def call( self, hidden_states: tf.Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFBaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict all_hidden_states = (hidden_states,) if output_hidden_states else None for i, block in enumerate(self.network): hidden_states = block(hidden_states, training=training) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = tf.transpose(hidden_states, perm=[0, 3, 1, 2]) if all_hidden_states: all_hidden_states = tuple(tf.transpose(s, perm=[0, 3, 1, 2]) for s in all_hidden_states) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) def build(self, input_shape=None): for layer in self.network: with tf.name_scope(layer.name): layer.build(None) class TFSwiftFormerPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SwiftFormerConfig base_model_prefix = "swiftformer" main_input_name = "pixel_values" TFSWIFTFORMER_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` </Tip> Parameters: config ([`SwiftFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TFSWIFTFORMER_INPUTS_DOCSTRING = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. training (`bool`, *optional*, defaults to `False`): Whether or not to run the model in training mode. """ @keras_serializable class TFSwiftFormerMainLayer(keras.layers.Layer): config_class = SwiftFormerConfig def __init__(self, config: SwiftFormerConfig, **kwargs): super().__init__(**kwargs) self.config = config self.patch_embed = TFSwiftFormerPatchEmbedding(config, name="patch_embed") self.encoder = TFSwiftFormerEncoder(config, name="encoder") @unpack_inputs def call( self, pixel_values: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFBaseModelOutputWithNoAttention]: r""" """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TF 2.0 image layers can't use NCHW format when running on CPU. # We transpose to NHWC format and then transpose back after the full forward pass. # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.patch_embed(pixel_values, training=training) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return tuple(v for v in encoder_outputs if v is not None) return TFBaseModelOutputWithNoAttention( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return if getattr(self, "patch_embed", None) is not None: with tf.name_scope(self.patch_embed.name): self.patch_embed.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) self.built = True @add_start_docstrings( "The bare TFSwiftFormer Model transformer outputting raw hidden-states without any specific head on top.", TFSWIFTFORMER_START_DOCSTRING, ) class TFSwiftFormerModel(TFSwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.swiftformer = TFSwiftFormerMainLayer(config, name="swiftformer") @unpack_inputs @add_start_docstrings_to_model_forward(TFSWIFTFORMER_INPUTS_DOCSTRING) def call( self, pixel_values: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutputWithNoAttention, tuple[tf.Tensor]]: outputs = self.swiftformer( pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return if getattr(self, "swiftformer", None) is not None: with tf.name_scope(self.swiftformer.name): self.swiftformer.build(None) self.built = True @add_start_docstrings( """ TFSwiftFormer Model transformer with an image classification head on top (e.g. for ImageNet). """, TFSWIFTFORMER_START_DOCSTRING, ) class TFSwiftFormerForImageClassification(TFSwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig, **kwargs) -> None: super().__init__(config, **kwargs) self.num_labels = config.num_labels self.swiftformer = TFSwiftFormerMainLayer(config, name="swiftformer") # Classifier head self.norm = keras.layers.BatchNormalization(epsilon=config.batch_norm_eps, momentum=0.9, name="norm") self.head = ( keras.layers.Dense(self.num_labels, name="head") if self.num_labels > 0 else keras.layers.Identity(name="head") ) self.dist_head = ( keras.layers.Dense(self.num_labels, name="dist_head") if self.num_labels > 0 else keras.layers.Identity(name="dist_head") ) def hf_compute_loss(self, labels, logits): if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == tf.int64 or labels.dtype == tf.int32): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = keras.losses.MSE if self.num_labels == 1: loss = loss_fct(labels.squeeze(), logits.squeeze()) else: loss = loss_fct(labels, logits) elif self.config.problem_type == "single_label_classification": loss_fct = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE ) loss = loss_fct(labels, logits) elif self.config.problem_type == "multi_label_classification": loss_fct = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=keras.losses.Reduction.NONE, ) loss = loss_fct(labels, logits) else: loss = None return loss @unpack_inputs @add_start_docstrings_to_model_forward(TFSWIFTFORMER_INPUTS_DOCSTRING) def call( self, pixel_values: Optional[tf.Tensor] = None, labels: Optional[tf.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[tuple, TFImageClassifierOutputWithNoAttention]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # run base model outputs = self.swiftformer( pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs.last_hidden_state if return_dict else outputs[0] sequence_output = tf.transpose(sequence_output, perm=[0, 2, 3, 1]) # run classification head sequence_output = self.norm(sequence_output, training=training) sequence_output = tf.transpose(sequence_output, perm=[0, 3, 1, 2]) _, num_channels, height, width = sequence_output.shape sequence_output = tf.reshape(sequence_output, [-1, num_channels, height * width]) sequence_output = tf.reduce_mean(sequence_output, axis=-1) cls_out = self.head(sequence_output) distillation_out = self.dist_head(sequence_output) logits = (cls_out + distillation_out) / 2 # calculate loss loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) def build(self, input_shape=None): if self.built: return if getattr(self, "swiftformer", None) is not None: with tf.name_scope(self.swiftformer.name): self.swiftformer.build(None) if getattr(self, "norm", None) is not None: with tf.name_scope(self.norm.name): self.norm.build((None, None, None, self.config.embed_dims[-1])) if getattr(self, "head", None) is not None: with tf.name_scope(self.head.name): self.head.build(self.config.embed_dims[-1]) if getattr(self, "dist_head", None) is not None: with tf.name_scope(self.dist_head.name): self.dist_head.build(self.config.embed_dims[-1]) self.built = True __all__ = ["TFSwiftFormerForImageClassification", "TFSwiftFormerModel", "TFSwiftFormerPreTrainedModel"]
transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py/0
{ "file_path": "transformers/src/transformers/models/swiftformer/modeling_tf_swiftformer.py", "repo_id": "transformers", "token_count": 15157 }
517
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Swinv2 Transformer model.""" import collections.abc import math import warnings from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging, torch_int from ...utils.backbone_utils import BackboneMixin from .configuration_swinv2 import Swinv2Config logger = logging.get_logger(__name__) # drop_path, Swinv2PatchEmbeddings, Swinv2PatchMerging and Swinv2DropPath are from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/swin_transformer_v2.py. @dataclass @auto_docstring( custom_intro=""" Swinv2 encoder's outputs, with potential hidden states and attentions. """ ) # Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->Swinv2 class Swinv2EncoderOutput(ModelOutput): r""" reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Swinv2 model's outputs that also contains a pooling of the last hidden states. """ ) # Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->Swinv2 class Swinv2ModelOutput(ModelOutput): r""" pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Swinv2 masked image model outputs. """ ) # Copied from transformers.models.swin.modeling_swin.SwinMaskedImageModelingOutput with Swin->Swinv2 class Swinv2MaskedImageModelingOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided): Masked image modeling (MLM) loss. reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Reconstructed pixel values. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None reconstruction: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @property def logits(self): warnings.warn( "logits attribute is deprecated and will be removed in version 5 of Transformers." " Please use the reconstruction attribute to retrieve the final output instead.", FutureWarning, ) return self.reconstruction @dataclass @auto_docstring( custom_intro=""" Swinv2 outputs for image classification. """ ) # Copied from transformers.models.swin.modeling_swin.SwinImageClassifierOutput with Swin->Swinv2 class Swinv2ImageClassifierOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None # Copied from transformers.models.swin.modeling_swin.window_partition def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.window_reverse def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swinv2 class Swinv2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" # Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->Swinv2 class Swinv2Embeddings(nn.Module): """ Construct the patch and position embeddings. Optionally, also the mask token. """ def __init__(self, config, use_mask_token=False): super().__init__() self.patch_embeddings = Swinv2PatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.patch_grid = self.patch_embeddings.grid_size self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.patch_size = config.patch_size self.config = config # Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embeddings.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embeddings class_pos_embed = self.position_embeddings[:, :1] patch_pos_embed = self.position_embeddings[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward( self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None, interpolate_pos_encoding: bool = False, ) -> tuple[torch.Tensor]: _, num_channels, height, width = pixel_values.shape embeddings, output_dimensions = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) batch_size, seq_len, _ = embeddings.size() if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1.0 - mask) + mask_tokens * mask if self.position_embeddings is not None: if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions # Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings with Swin->Swinv2 class Swinv2PatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() image_size, patch_size = config.image_size, config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_patches = num_patches self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) def maybe_pad(self, pixel_values, height, width): if width % self.patch_size[1] != 0: pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) pixel_values = nn.functional.pad(pixel_values, pad_values) if height % self.patch_size[0] != 0: pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) pixel_values = nn.functional.pad(pixel_values, pad_values) return pixel_values def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]: _, num_channels, height, width = pixel_values.shape # pad the input to be divisible by self.patch_size, if needed pixel_values = self.maybe_pad(pixel_values, height, width) embeddings = self.projection(pixel_values) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, output_dimensions class Swinv2PatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(2 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be divisible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # [batch_size, height/2 * width/2, 4*num_channels] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C] input_feature = self.reduction(input_feature) input_feature = self.norm(input_feature) return input_feature class Swinv2SelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.pretrained_window_size = pretrained_window_size self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.continuous_position_bias_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float() relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float() relative_coords_table = ( torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij")) .permute(1, 2, 0) .contiguous() .unsqueeze(0) ) # [1, 2*window_height - 1, 2*window_width - 1, 2] if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1 relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1 elif window_size > 1: relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = ( torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8) ) # set to same dtype as mlp weight relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # cosine attention attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize( key_layer, dim=-1 ).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp() attention_scores = attention_scores * logit_scale relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view( -1, self.num_attention_heads ) # [window_height*window_width,window_height*window_width,num_attention_heads] relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # [num_attention_heads,window_height*window_width,window_height*window_width] relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in Swinv2Model forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swinv2 class Swinv2SelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class Swinv2Attention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0): super().__init__() self.self = Swinv2SelfAttention( config=config, dim=dim, num_heads=num_heads, window_size=window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.output = Swinv2SelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->Swinv2 class Swinv2Intermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->Swinv2 class Swinv2Output(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class Swinv2Layer(nn.Module): def __init__( self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0 ): super().__init__() self.input_resolution = input_resolution window_size, shift_size = self._compute_window_shift( (config.window_size, config.window_size), (shift_size, shift_size) ) self.window_size = window_size[0] self.shift_size = shift_size[0] self.attention = Swinv2Attention( config=config, dim=dim, num_heads=num_heads, window_size=self.window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.drop_path = Swinv2DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.intermediate = Swinv2Intermediate(config, dim) self.output = Swinv2Output(config, dim) self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]: window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return window_size, shift_size def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for shifted window multihead self attention img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, torch.Tensor]: height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states # pad hidden_states to multiples of window size hidden_states = hidden_states.view(batch_size, height, width, channels) hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = self.layernorm_before(attention_windows) hidden_states = shortcut + self.drop_path(hidden_states) layer_output = self.intermediate(hidden_states) layer_output = self.output(layer_output) layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output)) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class Swinv2Stage(GradientCheckpointingLayer): def __init__( self, config, dim, input_resolution, depth, num_heads, drop_path, downsample, pretrained_window_size=0 ): super().__init__() self.config = config self.dim = dim blocks = [] for i in range(depth): block = Swinv2Layer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, drop_path_rate=drop_path[i], shift_size=0 if (i % 2 == 0) else config.window_size // 2, pretrained_window_size=pretrained_window_size, ) blocks.append(block) self.blocks = nn.ModuleList(blocks) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class Swinv2Encoder(nn.Module): def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)): super().__init__() self.num_layers = len(config.depths) self.config = config if self.config.pretrained_window_sizes is not None: pretrained_window_sizes = config.pretrained_window_sizes dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")] layers = [] for i_layer in range(self.num_layers): stage = Swinv2Stage( config=config, dim=int(config.embed_dim * 2**i_layer), input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=Swinv2PatchMerging if (i_layer < self.num_layers - 1) else None, pretrained_window_size=pretrained_window_sizes[i_layer], ) layers.append(stage) self.layers = nn.ModuleList(layers) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, Swinv2EncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange b (h w) c -> b c h w # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange b (h w) c -> b c h w reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states] if v is not None ) return Swinv2EncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) @auto_docstring class Swinv2PreTrainedModel(PreTrainedModel): config: Swinv2Config base_model_prefix = "swinv2" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["Swinv2Stage"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, Swinv2Embeddings): if module.mask_token is not None: module.mask_token.data.zero_() if module.position_embeddings is not None: module.position_embeddings.data.zero_() elif isinstance(module, Swinv2SelfAttention): module.logit_scale.data.fill_(math.log(10)) @auto_docstring # Copied from transformers.models.swin.modeling_swin.SwinModel with SWIN->SWINV2,Swin->Swinv2 class Swinv2Model(Swinv2PreTrainedModel): def __init__(self, config, add_pooling_layer=True, use_mask_token=False): r""" add_pooling_layer (`bool`, *optional*, defaults to `True`): Whether or not to apply pooling layer. use_mask_token (`bool`, *optional*, defaults to `False`): Whether or not to create and apply mask tokens in the embedding layer. """ super().__init__(config) self.config = config self.num_layers = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) self.embeddings = Swinv2Embeddings(config, use_mask_token=use_mask_token) self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, Swinv2ModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) embedding_output, input_dimensions = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding ) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return Swinv2ModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @auto_docstring( custom_intro=""" Swinv2 Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886). <Tip> Note that we provide a script to pre-train this model on custom data in our [examples directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). </Tip> """ ) # Copied from transformers.models.swin.modeling_swin.SwinForMaskedImageModeling with swin->swinv2, base-simmim-window6-192->tiny-patch4-window8-256,SWIN->SWINV2,Swin->Swinv2,192->256 class Swinv2ForMaskedImageModeling(Swinv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.swinv2 = Swinv2Model(config, add_pooling_layer=False, use_mask_token=True) num_features = int(config.embed_dim * 2 ** (config.num_layers - 1)) self.decoder = nn.Sequential( nn.Conv2d( in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1 ), nn.PixelShuffle(config.encoder_stride), ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, Swinv2MaskedImageModelingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Examples: ```python >>> from transformers import AutoImageProcessor, Swinv2ForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> model = Swinv2ForMaskedImageModeling.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 256, 256] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swinv2( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output.transpose(1, 2) batch_size, num_channels, sequence_length = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return Swinv2MaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @auto_docstring( custom_intro=""" Swinv2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. <Tip> Note that it's possible to fine-tune SwinV2 on higher resolution images than the ones it has been trained on, by setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained position embeddings to the higher resolution. </Tip> """ ) # Copied from transformers.models.swin.modeling_swin.SwinForImageClassification with SWIN->SWINV2,Swin->Swinv2,swin->swinv2 class Swinv2ForImageClassification(Swinv2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.swinv2 = Swinv2Model(config) # Classifier head self.classifier = ( nn.Linear(self.swinv2.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, Swinv2ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.swinv2( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, pooled_logits=logits, config=self.config) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return Swinv2ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @auto_docstring( custom_intro=""" Swinv2 backbone, to be used with frameworks like DETR and MaskFormer. """ ) class Swinv2Backbone(Swinv2PreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] self.embeddings = Swinv2Embeddings(config) self.encoder = Swinv2Encoder(config, self.embeddings.patch_grid) # initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @auto_docstring def forward( self, pixel_values: Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") >>> model = AutoBackbone.from_pretrained( ... "microsoft/swinv2-tiny-patch4-window8-256", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 2048, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output, input_dimensions = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, input_dimensions, head_mask=None, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=return_dict, ) hidden_states = outputs.reshaped_hidden_states if return_dict else outputs[-1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs[1],) if output_attentions: output += (outputs[2],) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = [ "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", "Swinv2Backbone", ]
transformers/src/transformers/models/swinv2/modeling_swinv2.py/0
{ "file_path": "transformers/src/transformers/models/swinv2/modeling_swinv2.py", "repo_id": "transformers", "token_count": 24944 }
518
# coding=utf-8 # Copyright 2018 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for model T5.""" import os import re import warnings from shutil import copyfile from typing import Optional from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_t5 import T5Tokenizer else: T5Tokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} # TODO(PVP) - this should be removed in Transformers v5 class T5TokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" T5 tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. extra_ids (`int`, *optional*, defaults to 100): Add a number of extra ids added to the vocabulary for use as sentinels. These tokens are accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. These tokens can be retrieved by calling get_sentinel_tokens method and token ids can be by calling get_sentinel_token_ids method additional_special_tokens (`list[str]`, *optional*): Additional special tokens used by the tokenizer. add_prefix_space (`bool`, *optional*): Whether or not the tokenizer should automatically add a prefix space from_slow (`book`, *optional*, defaults to `False`): Whether or not the tokenizer should be converted from a slow one. If `add_prefix_space` is set, this will be set to `True`. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = T5Tokenizer prefix_tokens: list[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, eos_token="</s>", unk_token="<unk>", pad_token="<pad>", extra_ids=100, additional_special_tokens=None, add_prefix_space=None, **kwargs, ): # Add extra_ids to the special token list if additional_special_tokens is not None: extra_tokens = [x for x in additional_special_tokens if "<extra_id_" in str(x)] if len(extra_tokens) < 1: additional_special_tokens += [f"<extra_id_{i}>" for i in range(extra_ids)] elif extra_ids > 0 and extra_ids != len(extra_tokens): raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) else: extra_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)] additional_special_tokens = extra_tokens if add_prefix_space is not None: logger.warning_once( "You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers" ) kwargs["from_slow"] = True super().__init__( vocab_file=vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=extra_ids, additional_special_tokens=additional_special_tokens, add_prefix_space=add_prefix_space, **kwargs, ) self.vocab_file = vocab_file self._extra_ids = extra_ids @staticmethod def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length): if pretrained_model_name_or_path in T5TokenizerFast.max_model_input_sizes: deprecated_max_model_length = T5TokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value.", FutureWarning, ) return max_model_length def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) logger.info(f"Copy vocab file to {out_vocab_file}") return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A sequence has the following format: - single sequence: `X </s>` - pair of sequences: `A </s> B </s>` Args: token_ids_0 (`list[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ token_ids_0 = token_ids_0 + [self.eos_token_id] if token_ids_1 is None: return self.prefix_tokens + token_ids_0 else: token_ids_1 = token_ids_1 + [self.eos_token_id] return self.prefix_tokens + token_ids_0 + token_ids_1 def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of zeros. """ eos = [self.eos_token_id] if token_ids_1 is None: return len(token_ids_0 + eos) * [0] return len(token_ids_0 + eos + token_ids_1 + eos) * [0] def get_sentinel_tokens(self): return list( set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) def get_sentinel_token_ids(self): return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()] __all__ = ["T5TokenizerFast"]
transformers/src/transformers/models/t5/tokenization_t5_fast.py/0
{ "file_path": "transformers/src/transformers/models/t5/tokenization_t5_fast.py", "repo_id": "transformers", "token_count": 4244 }
519
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert TimeSformer checkpoints from the original repository: https://github.com/MCG-NJU/TimeSformer""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import TimesformerConfig, TimesformerForVideoClassification, VideoMAEImageProcessor def get_timesformer_config(model_name): config = TimesformerConfig() if "large" in model_name: config.num_frames = 96 if "hr" in model_name: config.num_frames = 16 config.image_size = 448 repo_id = "huggingface/label-files" if "k400" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" elif "k600" in model_name: config.num_labels = 600 filename = "kinetics600-id2label.json" elif "ssv2" in model_name: config.num_labels = 174 filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'k400', 'k600' or 'ssv2'.") id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(name): if "encoder." in name: name = name.replace("encoder.", "") if "cls_token" in name: name = name.replace("cls_token", "timesformer.embeddings.cls_token") if "pos_embed" in name: name = name.replace("pos_embed", "timesformer.embeddings.position_embeddings") if "time_embed" in name: name = name.replace("time_embed", "timesformer.embeddings.time_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "timesformer.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "timesformer.embeddings.norm") if "blocks" in name: name = name.replace("blocks", "timesformer.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "bias" not in name and "temporal" not in name: name = name.replace("attn", "attention.self") if "attn" in name and "temporal" not in name: name = name.replace("attn", "attention.attention") if "temporal_norm1" in name: name = name.replace("temporal_norm1", "temporal_layernorm") if "temporal_attn.proj" in name: name = name.replace("temporal_attn", "temporal_attention.output.dense") if "temporal_fc" in name: name = name.replace("temporal_fc", "temporal_dense") if "norm1" in name and "temporal" not in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "norm.weight" in name and "fc" not in name and "temporal" not in name: name = name.replace("norm.weight", "timesformer.layernorm.weight") if "norm.bias" in name and "fc" not in name and "temporal" not in name: name = name.replace("norm.bias", "timesformer.layernorm.bias") if "head" in name: name = name.replace("head", "classifier") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy(): val = orig_state_dict.pop(key) if key.startswith("model."): key = key.replace("model.", "") if "qkv" in key: key_split = key.split(".") layer_num = int(key_split[1]) prefix = "timesformer.encoder.layer." if "temporal" in key: postfix = ".temporal_attention.attention.qkv." else: postfix = ".attention.attention.qkv." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}{postfix}weight"] = val else: orig_state_dict[f"{prefix}{layer_num}{postfix}bias"] = val else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) def convert_timesformer_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): config = get_timesformer_config(model_name) model = TimesformerForVideoClassification(config) # download original checkpoint, hosted on Google Drive output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) files = torch.load(output, map_location="cpu", weights_only=True) if "model" in files: state_dict = files["model"] elif "module" in files: state_dict = files["module"] else: state_dict = files["model_state"] new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() # verify model on basic input image_processor = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) video = prepare_video() inputs = image_processor(video[:8], return_tensors="pt") outputs = model(**inputs) logits = outputs.logits model_names = [ # Kinetics-400 checkpoints (hr = high resolution input of 448px instead of 224px) "timesformer-base-finetuned-k400", "timesformer-large-finetuned-k400", "timesformer-hr-finetuned-k400", # Kinetics-600 checkpoints (hr = high resolution input of 448px instead of 224px) "timesformer-base-finetuned-k600", "timesformer-large-finetuned-k600", "timesformer-hr-finetuned-k600", # Something-Something-v2 checkpoints (hr = high resolution input of 448px instead of 224px) "timesformer-base-finetuned-ssv2", "timesformer-large-finetuned-ssv2", "timesformer-hr-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "timesformer-base-finetuned-k400": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-0.3016, -0.7713, -0.4205]) elif model_name == "timesformer-base-finetuned-k600": expected_shape = torch.Size([1, 600]) expected_slice = torch.tensor([-0.7267, -0.7466, 3.2404]) elif model_name == "timesformer-base-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-0.9059, 0.6433, -3.1457]) elif model_name == "timesformer-large-finetuned-k400": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0, 0, 0]) elif model_name == "timesformer-large-finetuned-k600": expected_shape = torch.Size([1, 600]) expected_slice = torch.tensor([0, 0, 0]) elif model_name == "timesformer-large-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0, 0, 0]) elif model_name == "timesformer-hr-finetuned-k400": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([-0.9617, -3.7311, -3.7708]) elif model_name == "timesformer-hr-finetuned-k600": expected_shape = torch.Size([1, 600]) expected_slice = torch.tensor([2.5273, 0.7127, 1.8848]) elif model_name == "timesformer-hr-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-3.6756, -0.7513, 0.7180]) else: raise ValueError(f"Model name not supported. Should be one of {model_names}") # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) print("Logits ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(f"fcakyon/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=17yvuYp9L4mn-HpIcK5Zo6K3UoOy1kA5l&export=download", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="timesformer-base-finetuned-k400", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_timesformer_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub )
transformers/src/transformers/models/timesformer/convert_timesformer_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/timesformer/convert_timesformer_to_pytorch.py", "repo_id": "transformers", "token_count": 4209 }
520
# coding=utf-8 # Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License=, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing=, software # distributed under the License is distributed on an "AS IS" BASIS=, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for TVP.""" from collections.abc import Iterable from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( PaddingMode, flip_channel_order, pad, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_valid_image, to_numpy_array, valid_images, validate_preprocess_arguments, ) from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) # Copied from transformers.models.vivit.image_processing_vivit.make_batched def make_batched(videos) -> list[list[ImageInput]]: if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(videos): return [[videos]] raise ValueError(f"Could not make batched video from {videos}") def get_resize_output_image_size( input_image: np.ndarray, max_size: int = 448, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple[int, int]: height, width = get_image_size(input_image, input_data_format) if height >= width: ratio = width * 1.0 / height new_height = max_size new_width = new_height * ratio else: ratio = height * 1.0 / width new_width = max_size new_height = new_width * ratio size = (int(new_height), int(new_width)) return size class TvpImageProcessor(BaseImageProcessor): r""" Constructs a Tvp image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"longest_edge": 448}`): Size of the output image after resizing. The longest edge of the image will be resized to `size["longest_edge"]` while maintaining the aspect ratio of the original image. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by the `do_center_crop` parameter in the `preprocess` method. crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): Size of the image after applying the center crop. Can be overridden by the `crop_size` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the `preprocess` method. constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): The fill value to use when padding the image. pad_mode (`PaddingMode`, *optional*, defaults to `PaddingMode.CONSTANT`): Use what kind of mode in padding. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. do_flip_channel_order (`bool`, *optional*, defaults to `True`): Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_center_crop: bool = True, crop_size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: Optional[dict[str, int]] = None, constant_values: Union[float, Iterable[float]] = 0, pad_mode: PaddingMode = PaddingMode.CONSTANT, do_normalize: bool = True, do_flip_channel_order: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"longest_edge": 448} crop_size = crop_size if crop_size is not None else {"height": 448, "width": 448} pad_size = pad_size if pad_size is not None else {"height": 448, "width": 448} self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.constant_values = constant_values self.pad_mode = pad_mode self.do_normalize = do_normalize self.do_flip_channel_order = do_flip_channel_order self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Size of the output image. If `size` is of the form `{"height": h, "width": w}`, the output image will have the size `(h, w)`. If `size` is of the form `{"longest_edge": s}`, the output image will have its longest edge of length `s` while keeping the aspect ratio of the original image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=False) if "height" in size and "width" in size: output_size = (size["height"], size["width"]) elif "longest_edge" in size: output_size = get_resize_output_image_size(image, size["longest_edge"], input_data_format) else: raise ValueError(f"Size must have 'height' and 'width' or 'longest_edge' as keys. Got {size.keys()}") return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def pad_image( self, image: np.ndarray, pad_size: Optional[dict[str, int]] = None, constant_values: Union[float, Iterable[float]] = 0, pad_mode: PaddingMode = PaddingMode.CONSTANT, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Pad an image with zeros to the given size. Args: image (`np.ndarray`): Image to pad. pad_size (`dict[str, int]`) Size of the output image with pad. constant_values (`Union[float, Iterable[float]]`) The fill value to use when padding the image. pad_mode (`PaddingMode`) The pad mode, default to PaddingMode.CONSTANT data_format (`ChannelDimension` or `str`, *optional*) The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ height, width = get_image_size(image, channel_dim=input_data_format) max_height = pad_size.get("height", height) max_width = pad_size.get("width", width) pad_right, pad_bottom = max_width - width, max_height - height if pad_right < 0 or pad_bottom < 0: raise ValueError("The padding size must be greater than image size") padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=pad_mode, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, ) return padded_image def _preprocess_image( self, image: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_pad: bool = True, pad_size: Optional[dict[str, int]] = None, constant_values: Optional[Union[float, Iterable[float]]] = None, pad_mode: PaddingMode = None, do_normalize: Optional[bool] = None, do_flip_channel_order: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """Preprocesses a single image.""" validate_preprocess_arguments( do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=pad_size, # here the pad() method simply requires the pad_size argument. do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample, ) # All transformations expect numpy arrays. image = to_numpy_array(image) if do_resize: image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) if do_center_crop: image = self.center_crop(image, size=crop_size, input_data_format=input_data_format) if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image.astype(np.float32), mean=image_mean, std=image_std, input_data_format=input_data_format ) if do_pad: image = self.pad_image( image=image, pad_size=pad_size, constant_values=constant_values, pad_mode=pad_mode, input_data_format=input_data_format, ) # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: image = flip_channel_order(image=image, input_data_format=input_data_format) image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image @filter_out_non_signature_kwargs() def preprocess( self, videos: Union[ImageInput, list[ImageInput], list[list[ImageInput]]], do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_center_crop: Optional[bool] = None, crop_size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_pad: Optional[bool] = None, pad_size: Optional[dict[str, int]] = None, constant_values: Optional[Union[float, Iterable[float]]] = None, pad_mode: PaddingMode = None, do_normalize: Optional[bool] = None, do_flip_channel_order: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: videos (`ImageInput` or `list[ImageInput]` or `list[list[ImageInput]]`): Frames to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after applying resize. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`): Whether to centre crop the image. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after applying the centre crop. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. pad_size (`dict[str, int]`, *optional*, defaults to `{"height": 448, "width": 448}`): Size of the image after applying the padding. Can be overridden by the `pad_size` parameter in the `preprocess` method. constant_values (`Union[float, Iterable[float]]`, *optional*, defaults to 0): The fill value to use when padding the image. pad_mode (`PaddingMode`, *optional*, defaults to "PaddingMode.CONSTANT"): Use what kind of mode in padding. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`): Whether to flip the channel order of the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the inferred channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_pad = do_pad if do_pad is not None else self.do_pad pad_size = pad_size if pad_size is not None else self.pad_size constant_values = constant_values if constant_values is not None else self.constant_values pad_mode = pad_mode if pad_mode else self.pad_mode do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_flip_channel_order = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") if not valid_images(videos): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) videos = make_batched(videos) videos = [ np.array( [ self._preprocess_image( image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_pad=do_pad, pad_size=pad_size, constant_values=constant_values, pad_mode=pad_mode, do_normalize=do_normalize, do_flip_channel_order=do_flip_channel_order, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format, ) for img in video ] ) for video in videos ] data = {"pixel_values": videos} return BatchFeature(data=data, tensor_type=return_tensors) __all__ = ["TvpImageProcessor"]
transformers/src/transformers/models/tvp/image_processing_tvp.py/0
{ "file_path": "transformers/src/transformers/models/tvp/image_processing_tvp.py", "repo_id": "transformers", "token_count": 9938 }
521
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """UperNet model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto.configuration_auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class UperNetConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of an [`UperNetForSemanticSegmentation`]. It is used to instantiate an UperNet model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the UperNet [openmmlab/upernet-convnext-tiny](https://huggingface.co/openmmlab/upernet-convnext-tiny) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`): Pooling scales used in Pooling Pyramid Module applied on the last feature map. use_auxiliary_head (`bool`, *optional*, defaults to `True`): Whether to use an auxiliary head during training. auxiliary_loss_weight (`float`, *optional*, defaults to 0.4): Weight of the cross-entropy loss of the auxiliary head. auxiliary_channels (`int`, *optional*, defaults to 256): Number of channels to use in the auxiliary head. auxiliary_num_convs (`int`, *optional*, defaults to 1): Number of convolutional layers to use in the auxiliary head. auxiliary_concat_input (`bool`, *optional*, defaults to `False`): Whether to concatenate the output of the auxiliary head with the input before the classification layer. loss_ignore_index (`int`, *optional*, defaults to 255): The index that is ignored by the loss function. Examples: ```python >>> from transformers import UperNetConfig, UperNetForSemanticSegmentation >>> # Initializing a configuration >>> configuration = UperNetConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = UperNetForSemanticSegmentation(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "upernet" def __init__( self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=None, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, **kwargs, ): super().__init__(**kwargs) if backbone_config is None and backbone is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.get("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.backbone_kwargs = backbone_kwargs self.hidden_size = hidden_size self.initializer_range = initializer_range self.pool_scales = pool_scales self.use_auxiliary_head = use_auxiliary_head self.auxiliary_loss_weight = auxiliary_loss_weight self.auxiliary_in_channels = auxiliary_in_channels self.auxiliary_channels = auxiliary_channels self.auxiliary_num_convs = auxiliary_num_convs self.auxiliary_concat_input = auxiliary_concat_input self.loss_ignore_index = loss_ignore_index @property def sub_configs(self): return ( {"backbone_config": type(self.backbone_config)} if getattr(self, "backbone_config", None) is not None else {} ) __all__ = ["UperNetConfig"]
transformers/src/transformers/models/upernet/configuration_upernet.py/0
{ "file_path": "transformers/src/transformers/models/upernet/configuration_upernet.py", "repo_id": "transformers", "token_count": 2522 }
522
# coding=utf-8 # Copyright 2022 Multimedia Computing Group, Nanjing University and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VideoMAE (masked autoencoder) model.""" import collections.abc from copy import deepcopy from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, auto_docstring, logging, ) from ...utils.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .configuration_videomae import VideoMAEConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Class for VideoMAEDecoder's outputs, with potential hidden states and attentions. """ ) class VideoMAEDecoderOutput(ModelOutput): r""" logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. """ logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @dataclass @auto_docstring( custom_intro=""" Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions. """ ) class VideoMAEForPreTrainingOutput(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`): Pixel reconstruction loss. logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): Pixel reconstruction logits. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None # sin-cos position encoding # https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 def get_sinusoid_encoding_table(n_position, d_hid): """Sinusoid position encoding table""" # TODO: make it with torch instead of numpy def get_position_angle_vec(position): return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 return torch.FloatTensor(sinusoid_table).unsqueeze(0) class VideoMAEEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = VideoMAEPatchEmbeddings(config) self.num_patches = self.patch_embeddings.num_patches # fixed sin-cos embedding self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size) self.config = config def forward(self, pixel_values, bool_masked_pos): # create patch embeddings embeddings = self.patch_embeddings(pixel_values) # add position embeddings embeddings = embeddings + self.position_embeddings.detach().type_as(embeddings).to( device=embeddings.device, copy=True ) # only keep visible patches # ~bool_masked_pos means visible if bool_masked_pos is not None: batch_size, _, num_channels = embeddings.shape embeddings = embeddings[~bool_masked_pos] embeddings = embeddings.reshape(batch_size, -1, num_channels) return embeddings class VideoMAEPatchEmbeddings(nn.Module): """ Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder. The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width // patch_size). """ def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size num_channels = config.num_channels hidden_size = config.hidden_size num_frames = config.num_frames tubelet_size = config.tubelet_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) self.image_size = image_size self.patch_size = patch_size self.tubelet_size = int(tubelet_size) num_patches = ( (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) ) self.num_channels = num_channels self.num_patches = num_patches self.projection = nn.Conv3d( in_channels=num_channels, out_channels=hidden_size, kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), stride=(self.tubelet_size, patch_size[0], patch_size[1]), ) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) # permute to (batch_size, num_channels, num_frames, height, width) pixel_values = pixel_values.permute(0, 2, 1, 3, 4) embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) return embeddings # Copied from transformers.models.vit.modeling_vit.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class VideoMAESelfAttention(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) if config.qkv_bias: self.q_bias = nn.Parameter(torch.zeros(self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(self.all_head_size)) else: self.q_bias = None self.v_bias = None def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias) values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias) queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias) key_layer = keys.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = values.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) query_layer = queries.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE class VideoMAESelfOutput(nn.Module): """ The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VideoMAE class VideoMAEAttention(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.attention = VideoMAESelfAttention(config) self.output = VideoMAESelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->VideoMAE class VideoMAEIntermediate(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->VideoMAE class VideoMAEOutput(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE,VIT->VIDEOMAE class VideoMAELayer(GradientCheckpointingLayer): """This corresponds to the Block class in the timm implementation.""" def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VideoMAEAttention(config) self.intermediate = VideoMAEIntermediate(config) self.output = VideoMAEOutput(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in VideoMAE, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states # in VideoMAE, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) # second residual connection is done here layer_output = self.output(layer_output, hidden_states) outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE class VideoMAEEncoder(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class VideoMAEPreTrainedModel(PreTrainedModel): config: VideoMAEConfig base_model_prefix = "videomae" main_input_name = "pixel_values" supports_gradient_checkpointing = True _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv3d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class VideoMAEModel(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.embeddings = VideoMAEEmbeddings(config) self.encoder = VideoMAEEncoder(config) if config.use_mean_pooling: self.layernorm = None else: self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`. Examples: ```python >>> import av >>> import numpy as np >>> from transformers import AutoImageProcessor, VideoMAEModel >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 16 frames >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base") >>> # prepare video for the model >>> inputs = image_processor(list(video), return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 1568, 768] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values, bool_masked_pos) encoder_outputs = self.encoder( embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if self.layernorm is not None: sequence_output = self.layernorm(sequence_output) if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class VideoMAEDecoder(nn.Module): def __init__(self, config, num_patches): super().__init__() decoder_num_labels = config.num_channels * config.tubelet_size * config.patch_size**2 decoder_config = deepcopy(config) decoder_config.hidden_size = config.decoder_hidden_size decoder_config.num_hidden_layers = config.decoder_num_hidden_layers decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size self.decoder_layers = nn.ModuleList( [VideoMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)] ) self.norm = nn.LayerNorm(config.decoder_hidden_size) self.head = ( nn.Linear(config.decoder_hidden_size, decoder_num_labels) if decoder_num_labels > 0 else nn.Identity() ) self.gradient_checkpointing = False self.config = config def forward( self, hidden_states, return_token_num, output_attentions=False, output_hidden_states=False, return_dict=True, ): # apply Transformer layers (blocks) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.decoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if return_token_num > 0: hidden_states = hidden_states[:, -return_token_num:] # predictor projection hidden_states = self.norm(hidden_states) logits = self.head(hidden_states) if not return_dict: return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None) return VideoMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) @auto_docstring( custom_intro=""" The VideoMAE Model transformer with the decoder on top for self-supervised pre-training. """ ) class VideoMAEForPreTraining(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config self.videomae = VideoMAEModel(config) self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=False) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) self.position_embeddings = get_sinusoid_encoding_table( self.videomae.embeddings.num_patches, config.decoder_hidden_size ) self.decoder = VideoMAEDecoder(config, num_patches=self.videomae.embeddings.num_patches) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, bool_masked_pos: torch.BoolTensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, VideoMAEForPreTrainingOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`. Examples: ```python >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining >>> import numpy as np >>> import torch >>> num_frames = 16 >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224))) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base") >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base") >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2 >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss = outputs.loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.videomae( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.encoder_to_decoder( sequence_output ) # [batch_size, num_visible_patches, decoder_hidden_size] batch_size, seq_len, num_channels = sequence_output.shape # we don't unshuffle the correct visible token order, but shuffle the position embeddings accordingly. if bool_masked_pos is None: raise ValueError("One must provided a boolean mask ") expanded_position_embeddings = self.position_embeddings.expand(batch_size, -1, -1).type_as(pixel_values) expanded_position_embeddings = expanded_position_embeddings.detach().to(device=pixel_values.device, copy=True) pos_emb_visible = expanded_position_embeddings[~bool_masked_pos].reshape(batch_size, -1, num_channels) pos_emb_mask = expanded_position_embeddings[bool_masked_pos].reshape(batch_size, -1, num_channels) # [batch_size, num_patches, decoder_hidden_size] x_full = torch.cat([sequence_output + pos_emb_visible, self.mask_token + pos_emb_mask], dim=1) # [batch_size, num_masked_patches, num_channels * patch_size * patch_size] decoder_outputs = self.decoder(x_full, pos_emb_mask.shape[1]) logits = decoder_outputs.logits loss = None with torch.no_grad(): # calculate the labels to be predicted if self.config.num_channels != 3: # Can't unnormalize with default means/stds frames = pixel_values else: # first, unnormalize the frames device = pixel_values.device dtype = pixel_values.dtype mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device=device, dtype=dtype)[None, None, :, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device=device, dtype=dtype)[None, None, :, None, None] frames = pixel_values * std + mean # in [0, 1] batch_size, time, num_channels, height, width = frames.shape tubelet_size, patch_size = self.config.tubelet_size, self.config.patch_size if self.config.norm_pix_loss: # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) frames = frames.view( batch_size, time // tubelet_size, tubelet_size, num_channels, height // patch_size, patch_size, width // patch_size, patch_size, ) # step 2: move dimensions to concatenate: frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # step 3: concatenate: frames = frames.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size, num_channels, ) # step 4: normalize. The authors find that the mean is about 0.48 and standard deviation is about 0.08. frames_norm = (frames - frames.mean(dim=-2, keepdim=True)) / ( frames.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6 ) # step 5: reshape to (batch_size, T//ts * H//ps * W//ps, ts * ps * ps * C) videos_patch = frames_norm.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size * num_channels, ) else: if self.config.num_channels != 3: raise ValueError( "Can't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False." ) # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) frames = frames.view( batch_size, time // tubelet_size, tubelet_size, num_channels, height // patch_size, patch_size, width // patch_size, patch_size, ) # step 2: move dimensions to concatenate: (batch_size, T//ts, H//ps, W//ps, ts, ps, ps, C) frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() # step 3: concatenate videos_patch = frames.view( batch_size, time // tubelet_size * height // patch_size * width // patch_size, tubelet_size * patch_size * patch_size * num_channels, ) batch_size, _, num_channels = videos_patch.shape labels = videos_patch[bool_masked_pos].reshape(batch_size, -1, num_channels) loss_fct = MSELoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return VideoMAEForPreTrainingOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden states of all tokens) e.g. for ImageNet. """ ) class VideoMAEForVideoClassification(VideoMAEPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.videomae = VideoMAEModel(config) # Classifier head self.fc_norm = nn.LayerNorm(config.hidden_size) if config.use_mean_pooling else None self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Examples: ```python >>> import av >>> import torch >>> import numpy as np >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification >>> from huggingface_hub import hf_hub_download >>> np.random.seed(0) >>> def read_video_pyav(container, indices): ... ''' ... Decode the video with PyAV decoder. ... Args: ... container (`av.container.input.InputContainer`): PyAV container. ... indices (`list[int]`): List of frame indices to decode. ... Returns: ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). ... ''' ... frames = [] ... container.seek(0) ... start_index = indices[0] ... end_index = indices[-1] ... for i, frame in enumerate(container.decode(video=0)): ... if i > end_index: ... break ... if i >= start_index and i in indices: ... frames.append(frame) ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... ''' ... Sample a given number of frame indices from the video. ... Args: ... clip_len (`int`): Total number of frames to sample. ... frame_sample_rate (`int`): Sample every n-th frame. ... seg_len (`int`): Maximum allowed index of sample's last frame. ... Returns: ... indices (`list[int]`): List of sampled frame indices ... ''' ... converted_len = int(clip_len * frame_sample_rate) ... end_idx = np.random.randint(converted_len, seg_len) ... start_idx = end_idx - converted_len ... indices = np.linspace(start_idx, end_idx, num=clip_len) ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) ... return indices >>> # video clip consists of 300 frames (10 seconds at 30 FPS) >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) >>> container = av.open(file_path) >>> # sample 16 frames >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames) >>> video = read_video_pyav(container, indices) >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> inputs = image_processor(list(video), return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) ... logits = outputs.logits >>> # model predicts one of the 400 Kinetics-400 classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) eating spaghetti ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.videomae( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] if self.fc_norm is not None: sequence_output = self.fc_norm(sequence_output.mean(1)) else: sequence_output = sequence_output[:, 0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["VideoMAEForPreTraining", "VideoMAEModel", "VideoMAEPreTrainedModel", "VideoMAEForVideoClassification"]
transformers/src/transformers/models/videomae/modeling_videomae.py/0
{ "file_path": "transformers/src/transformers/models/videomae/modeling_videomae.py", "repo_id": "transformers", "token_count": 19064 }
523
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT and non-distilled DeiT checkpoints from the timm library.""" import argparse from pathlib import Path import requests import timm import torch from PIL import Image from timm.data import ImageNetInfo, infer_imagenet_subset from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our ViT structure. """ # define default ViT configuration config = ViTConfig() base_model = False # load original model from timm timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() # detect unsupported ViT models in transformers # fc_norm is present if not isinstance(getattr(timm_model, "fc_norm", None), torch.nn.Identity): raise ValueError(f"{vit_name} is not supported in transformers because of the presence of fc_norm.") # use of global average pooling in combination (or without) class token if getattr(timm_model, "global_pool", None) == "avg": raise ValueError(f"{vit_name} is not supported in transformers because of use of global average pooling.") # CLIP style vit with norm_pre layer present if "clip" in vit_name and not isinstance(getattr(timm_model, "norm_pre", None), torch.nn.Identity): raise ValueError( f"{vit_name} is not supported in transformers because it's a CLIP style ViT with norm_pre layer." ) # SigLIP style vit with attn_pool layer present if "siglip" in vit_name and getattr(timm_model, "global_pool", None) == "map": raise ValueError( f"{vit_name} is not supported in transformers because it's a SigLIP style ViT with attn_pool." ) # use of layer scale in ViT model blocks if not isinstance(getattr(timm_model.blocks[0], "ls1", None), torch.nn.Identity) or not isinstance( getattr(timm_model.blocks[0], "ls2", None), torch.nn.Identity ): raise ValueError(f"{vit_name} is not supported in transformers because it uses a layer scale in its blocks.") # Hybrid ResNet-ViTs if not isinstance(timm_model.patch_embed, timm.layers.PatchEmbed): raise ValueError(f"{vit_name} is not supported in transformers because it is a hybrid ResNet-ViT.") # get patch size and image size from the patch embedding submodule config.patch_size = timm_model.patch_embed.patch_size[0] config.image_size = timm_model.patch_embed.img_size[0] # retrieve architecture-specific parameters from the timm model config.hidden_size = timm_model.embed_dim config.intermediate_size = timm_model.blocks[0].mlp.fc1.out_features config.num_hidden_layers = len(timm_model.blocks) config.num_attention_heads = timm_model.blocks[0].attn.num_heads # check whether the model has a classification head or not if timm_model.num_classes != 0: config.num_labels = timm_model.num_classes # infer ImageNet subset from timm model imagenet_subset = infer_imagenet_subset(timm_model) dataset_info = ImageNetInfo(imagenet_subset) config.id2label = {i: dataset_info.index_to_label_name(i) for i in range(dataset_info.num_classes())} config.label2id = {v: k for k, v in config.id2label.items()} else: print(f"{vit_name} is going to be converted as a feature extractor only.") base_model = True # load state_dict of original model state_dict = timm_model.state_dict() # remove and rename some keys in the state dict if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) # load HuggingFace model if base_model: model = ViTModel(config, add_pooling_layer=False).eval() else: model = ViTForImageClassification(config).eval() model.load_state_dict(state_dict) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: image_processor = DeiTImageProcessor(size=config.image_size) else: image_processor = ViTImageProcessor(size=config.image_size) encoding = image_processor(images=prepare_img(), return_tensors="pt") pixel_values = encoding["pixel_values"] outputs = model(pixel_values) if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) assert timm_pooled_output.shape == outputs.last_hidden_state.shape assert torch.allclose(timm_pooled_output, outputs.last_hidden_state, atol=1e-1) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) args = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
transformers/src/transformers/models/vit/convert_vit_timm_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/vit/convert_vit_timm_to_pytorch.py", "repo_id": "transformers", "token_count": 4416 }
524
# coding=utf-8 # Copyright 2024 University of Sydney and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VitPose backbone model. This code is the same as the original Vision Transformer (ViT) with 2 modifications: - use of padding=2 in the patch embedding layer - addition of a mixture-of-experts MLP layer """ import collections.abc from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging from ...utils.backbone_utils import BackboneMixin from .configuration_vitpose_backbone import VitPoseBackboneConfig logger = logging.get_logger(__name__) class VitPoseBackbonePatchEmbeddings(nn.Module): """Image to Patch Embedding.""" def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size num_channels = config.num_channels embed_dim = config.hidden_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size, padding=2) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: height, width = pixel_values.shape[-2:] if height != self.image_size[0] or width != self.image_size[1]: raise ValueError( f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." ) embeddings = self.projection(pixel_values) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings class VitPoseBackboneEmbeddings(nn.Module): """ Construct the position and patch embeddings. """ def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.patch_embeddings = VitPoseBackbonePatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: embeddings = self.patch_embeddings(pixel_values) # add positional encoding to each token embeddings = embeddings + self.position_embeddings[:, 1:] + self.position_embeddings[:, :1] embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.vit.modeling_vit.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): # Take the dot product between "query" and "key" to get the raw attention scores. attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling # Normalize the attention scores to probabilities. attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) # Mask heads if we want to if attention_mask is not None: attn_weights = attn_weights * attention_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->VitPoseBackbone class VitPoseBackboneSelfAttention(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) def forward( self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: batch_size, seq_length, _ = hidden_states.shape key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VitPoseBackbone class VitPoseBackboneSelfOutput(nn.Module): """ The residual connection is defined in VitPoseBackboneLayer instead of here (as is the case with other models), due to the layernorm applied before each block. """ def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VitPoseBackbone class VitPoseBackboneAttention(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.attention = VitPoseBackboneSelfAttention(config) self.output = VitPoseBackboneSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads: set[int]) -> None: if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads ) # Prune linear layers self.attention.query = prune_linear_layer(self.attention.query, index) self.attention.key = prune_linear_layer(self.attention.key, index) self.attention.value = prune_linear_layer(self.attention.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: self_outputs = self.attention(hidden_states, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class VitPoseBackboneMoeMLP(nn.Module): def __init__(self, config: VitPoseBackboneConfig): super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) num_experts = config.num_experts part_features = config.part_features self.part_features = part_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features - part_features) self.drop = nn.Dropout(config.hidden_dropout_prob) self.num_experts = num_experts experts = [nn.Linear(hidden_features, part_features) for _ in range(num_experts)] self.experts = nn.ModuleList(experts) def forward(self, hidden_state: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: expert_hidden_state = torch.zeros_like(hidden_state[:, :, -self.part_features :]) hidden_state = self.fc1(hidden_state) hidden_state = self.act(hidden_state) shared_hidden_state = self.fc2(hidden_state) indices = indices.view(-1, 1, 1) # to support ddp training for i in range(self.num_experts): selected_index = indices == i current_hidden_state = self.experts[i](hidden_state) * selected_index expert_hidden_state = expert_hidden_state + current_hidden_state hidden_state = torch.cat([shared_hidden_state, expert_hidden_state], dim=-1) return hidden_state class VitPoseBackboneMLP(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() in_features = out_features = config.hidden_size hidden_features = int(config.hidden_size * config.mlp_ratio) self.fc1 = nn.Linear(in_features, hidden_features, bias=True) self.activation = ACT2FN[config.hidden_act] self.fc2 = nn.Linear(hidden_features, out_features, bias=True) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = self.fc1(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.fc2(hidden_state) return hidden_state class VitPoseBackboneLayer(GradientCheckpointingLayer): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.num_experts = config.num_experts self.attention = VitPoseBackboneAttention(config) self.mlp = VitPoseBackboneMLP(config) if self.num_experts == 1 else VitPoseBackboneMoeMLP(config) self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: # Validate dataset_index when using multiple experts if self.num_experts > 1 and dataset_index is None: raise ValueError( "dataset_index must be provided when using multiple experts " f"(num_experts={self.num_experts}). Please provide dataset_index " "to the forward pass." ) self_attention_outputs = self.attention( self.layernorm_before(hidden_states), # in VitPoseBackbone, layernorm is applied before self-attention head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection hidden_states = attention_output + hidden_states layer_output = self.layernorm_after(hidden_states) if self.num_experts == 1: layer_output = self.mlp(layer_output) else: layer_output = self.mlp(layer_output, indices=dataset_index) # second residual connection layer_output = layer_output + hidden_states outputs = (layer_output,) + outputs return outputs # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VitPoseBackbone class VitPoseBackboneEncoder(nn.Module): def __init__(self, config: VitPoseBackboneConfig) -> None: super().__init__() self.config = config self.layer = nn.ModuleList([VitPoseBackboneLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Ignore copy def forward( self, hidden_states: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, dataset_index, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class VitPoseBackbonePreTrainedModel(PreTrainedModel): config: VitPoseBackboneConfig base_model_prefix = "vit" main_input_name = "pixel_values" supports_gradient_checkpointing = True _no_split_modules = ["VitPoseBackboneEmbeddings", "VitPoseBackboneLayer"] _supports_sdpa = True _supports_flash_attn = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm, VitPoseBackboneEmbeddings]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_( module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range ).to(module.weight.dtype) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, VitPoseBackboneEmbeddings): module.position_embeddings.data = nn.init.trunc_normal_( module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range, ).to(module.position_embeddings.dtype) @auto_docstring( custom_intro=""" The VitPose backbone useful for downstream tasks. """ ) class VitPoseBackbone(VitPoseBackbonePreTrainedModel, BackboneMixin): def __init__(self, config: VitPoseBackboneConfig): super().__init__(config) super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = VitPoseBackboneEmbeddings(config) self.encoder = VitPoseBackboneEncoder(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.Tensor, dataset_index: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" dataset_index (`torch.Tensor` of shape `(batch_size,)`): Index to use in the Mixture-of-Experts (MoE) blocks of the backbone. This corresponds to the dataset index used during training, e.g. index 0 refers to COCO. Examples: ```python >>> from transformers import VitPoseBackboneConfig, VitPoseBackbone >>> import torch >>> config = VitPoseBackboneConfig(out_indices=[-1]) >>> model = VitPoseBackbone(config) >>> pixel_values = torch.randn(1, 3, 256, 192) >>> dataset_index = torch.tensor([1]) >>> outputs = model(pixel_values, dataset_index) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, dataset_index=dataset_index, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs.hidden_states if return_dict else outputs[1] feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: hidden_state = self.layernorm(hidden_state) feature_maps += (hidden_state,) if not return_dict: if output_hidden_states: output = (feature_maps,) + outputs[1:] else: output = (feature_maps,) + outputs[2:] return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = ["VitPoseBackbonePreTrainedModel", "VitPoseBackbone"]
transformers/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py/0
{ "file_path": "transformers/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py", "repo_id": "transformers", "token_count": 9083 }
525
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Video processor class for VJEPA2.""" from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ) from ...processing_utils import Unpack, VideosKwargs from ...utils import is_vision_available from ...utils.import_utils import requires from ...video_processing_utils import BaseVideoProcessor if is_vision_available(): from ...image_utils import PILImageResampling class VJEPA2VideoProcessorInitKwargs(VideosKwargs): ... @requires(backends=("torchvision",)) class VJEPA2VideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BILINEAR image_mean = IMAGENET_DEFAULT_MEAN image_std = IMAGENET_DEFAULT_STD size = {"shortest_edge": int(256 * 256 / 224)} crop_size = 256 do_resize = True do_rescale = True do_center_crop = True do_normalize = True valid_kwargs = VJEPA2VideoProcessorInitKwargs model_input_names = ["pixel_values_videos"] def __init__(self, **kwargs: Unpack[VJEPA2VideoProcessorInitKwargs]): crop_size = kwargs.get("crop_size", 256) if not isinstance(crop_size, int): if not isinstance(crop_size, dict) or "height" not in crop_size: raise ValueError("crop_size must be an integer or a dictionary with a 'height' key") crop_size = crop_size["height"] resize_size = int(crop_size * 256 / 224) kwargs["size"] = {"shortest_edge": resize_size} super().__init__(**kwargs) __all__ = ["VJEPA2VideoProcessor"]
transformers/src/transformers/models/vjepa2/video_processing_vjepa2.py/0
{ "file_path": "transformers/src/transformers/models/vjepa2/video_processing_vjepa2.py", "repo_id": "transformers", "token_count": 746 }
526
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for Wav2Vec2.""" import json import os import warnings from dataclasses import dataclass from itertools import groupby from typing import TYPE_CHECKING, Optional, Union import numpy as np from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken, BatchEncoding from ...utils import ( ModelOutput, PaddingStrategy, TensorType, add_end_docstrings, is_flax_available, is_tf_available, is_torch_available, logging, to_py_obj, ) logger = logging.get_logger(__name__) if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp # noqa: F401 VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", } # Wav2Vec2 has no max input length WAV2VEC2_KWARGS_DOCSTRING = r""" padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ ListOfDict = list[dict[str, Union[int, str]]] @dataclass class Wav2Vec2CTCTokenizerOutput(ModelOutput): """ Output type of [` Wav2Vec2CTCTokenizer`], with transcription. Args: text (list of `str` or `str`): Decoded logits in text from. Usually the speech transcription. char_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`): Offsets of the decoded characters. In combination with sampling rate and model downsampling rate char offsets can be used to compute time stamps for each character. Total logit score of the beam associated with produced text. word_offsets (list of `list[dict[str, Union[int, str]]]` or `list[dict[str, Union[int, str]]]`): Offsets of the decoded words. In combination with sampling rate and model downsampling rate word offsets can be used to compute time stamps for each word. """ text: Union[list[str], str] char_offsets: Union[list[ListOfDict], ListOfDict] = None word_offsets: Union[list[ListOfDict], ListOfDict] = None class Wav2Vec2CTCTokenizer(PreTrainedTokenizer): """ Constructs a Wav2Vec2CTC tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. word_delimiter_token (`str`, *optional*, defaults to `"|"`): The token used for defining the end of a word. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to accept lowercase input and lowercase the output when decoding. target_lang (`str`, *optional*): A target language the tokenizer should set by default. `target_lang` has to be defined for multi-lingual, nested vocabulary such as [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all). **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", word_delimiter_token="|", replace_word_delimiter_char=" ", do_lower_case=False, target_lang=None, **kwargs, ): self._word_delimiter_token = word_delimiter_token self.do_lower_case = do_lower_case self.replace_word_delimiter_char = replace_word_delimiter_char self.target_lang = target_lang with open(vocab_file, encoding="utf-8") as vocab_handle: self.vocab = json.load(vocab_handle) # if target lang is defined vocab must be a nested dict # with each target lang being one vocabulary if target_lang is not None: self.encoder = self.vocab[target_lang] else: self.encoder = self.vocab self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, word_delimiter_token=word_delimiter_token, replace_word_delimiter_char=replace_word_delimiter_char, target_lang=target_lang, **kwargs, ) # make sure that tokens made of several # characters are not split at tokenization for token in self.encoder: if len(token) > 1: self.add_tokens(AddedToken(token, rstrip=True, lstrip=True, normalized=False)) def set_target_lang(self, target_lang: str): """ Set the target language of a nested multi-lingual dictionary """ if self.vocab == self.encoder: raise ValueError(f"{self.vocab} is not a multi-lingual, nested tokenizer. Cannot set target language.") if target_lang not in self.vocab: raise ValueError(f"{target_lang} does not exist. Choose one of {', '.join(self.vocab.keys())}.") self.target_lang = target_lang self.init_kwargs["target_lang"] = target_lang self.encoder = self.vocab[target_lang] self.decoder = {v: k for k, v in self.encoder.items()} # make sure that tokens made of several # characters are not split at tokenization for token in self.encoder: if len(token) > 1: self.add_tokens(AddedToken(token, rstrip=True, lstrip=True, normalized=False)) @property def word_delimiter_token(self) -> str: """ `str`: Word delimiter token. Log an error if used while not having been set. """ if self._word_delimiter_token is None and self.verbose: logger.error("Using word_delimiter_token, but it is not set yet.") return None return str(self._word_delimiter_token) @property def word_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._word_delimiter_token is None: return None return self.convert_tokens_to_ids(self.word_delimiter_token) @word_delimiter_token.setter def word_delimiter_token(self, value): self._word_delimiter_token = value @word_delimiter_token_id.setter def word_delimiter_token_id(self, value): self._word_delimiter_token = self.convert_tokens_to_ids(value) @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> dict: vocab = dict(self.encoder) vocab.update(self.added_tokens_encoder) return vocab def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int: # Overwritten to never strip! to_add = [] for token in new_tokens: if isinstance(token, str): to_add.append(AddedToken(token, rstrip=False, lstrip=False, normalized=False)) else: to_add.append(token) return super()._add_tokens(to_add, special_tokens) def _tokenize(self, text, **kwargs): """ Converts a string into a sequence of tokens (string), using the tokenizer. """ if self.do_lower_case: text = text.upper() return list(text.replace(" ", self.word_delimiter_token)) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string( self, tokens: list[str], group_tokens: bool = True, spaces_between_special_tokens: bool = False, output_char_offsets: bool = False, output_word_offsets: bool = False, ) -> dict[str, Union[str, float]]: """ Converts a connectionist-temporal-classification (CTC) output tokens into a single string. """ if len(tokens) == 0: return {"text": "", "char_offsets": [], "word_offsets": []} # group same tokens into non-repeating tokens in CTC style decoding if group_tokens: chars, char_repetitions = zip(*((token, len(list(group_iter))) for token, group_iter in groupby(tokens))) else: chars = tokens char_repetitions = len(tokens) * [1] # filter self.pad_token which is used as CTC-blank token processed_chars = list(filter(lambda char: char != self.pad_token, chars)) # replace delimiter token processed_chars = [ self.replace_word_delimiter_char if char == self.word_delimiter_token else char for char in processed_chars ] # retrieve offsets char_offsets = word_offsets = None if output_char_offsets or output_word_offsets: char_offsets = self._compute_offsets(char_repetitions, chars, self.pad_token) if len(char_offsets) != len(processed_chars): raise ValueError( f"`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars}" " have to be of the same length, but are: " f"`len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`:" f" {len(processed_chars)}" ) # set tokens to correct processed token for i, char in enumerate(processed_chars): char_offsets[i]["char"] = char # retrieve word offsets from character offsets word_offsets = None if output_word_offsets: word_offsets = self._get_word_offsets(char_offsets, self.replace_word_delimiter_char) # don't output chars if not set to True if not output_char_offsets: char_offsets = None # join to string join_char = " " if spaces_between_special_tokens else "" string = join_char.join(processed_chars).strip() if self.do_lower_case: string = string.lower() return {"text": string, "char_offsets": char_offsets, "word_offsets": word_offsets} @staticmethod def _compute_offsets( char_repetitions: list[int], chars: list[str], ctc_token: int ) -> list[dict[str, Union[str, int]]]: end_indices = np.asarray(char_repetitions).cumsum() start_indices = np.concatenate(([0], end_indices[:-1])) offsets = [ {"char": t, "start_offset": s, "end_offset": e} for t, s, e in zip(chars, start_indices, end_indices) ] # filter out CTC token offsets = list(filter(lambda offsets: offsets["char"] != ctc_token, offsets)) return offsets @staticmethod def _get_word_offsets( offsets: dict[str, Union[str, float]], word_delimiter_char: str = " " ) -> dict[str, Union[str, float]]: word_offsets = [] last_state = "SPACE" word = "" start_offset = 0 end_offset = 0 for i, offset in enumerate(offsets): char = offset["char"] state = "SPACE" if char == word_delimiter_char else "WORD" if state == last_state: # If we are in the same state as before, we simply repeat what we've done before end_offset = offset["end_offset"] word += char else: # Switching state if state == "SPACE": # Finishing a word word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) else: # Starting a new word start_offset = offset["start_offset"] end_offset = offset["end_offset"] word = char last_state = state if last_state == "WORD": word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset}) return word_offsets def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): if is_split_into_words: text = " " + text return (text, kwargs) def _decode( self, token_ids: list[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, group_tokens: bool = True, spaces_between_special_tokens: bool = False, output_word_offsets: Optional[bool] = False, output_char_offsets: Optional[bool] = False, ) -> str: """ special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on the whole token list and not individually on added tokens """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) result = [] for token in filtered_tokens: if skip_special_tokens and ( token in self.all_special_ids or (token != self.pad_token and token in self.all_special_tokens) ): continue result.append(token) string_output = self.convert_tokens_to_string( result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, output_word_offsets=output_word_offsets, output_char_offsets=output_char_offsets, ) text = string_output["text"] clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: text = self.clean_up_tokenization(text) if output_word_offsets or output_char_offsets: return Wav2Vec2CTCTokenizerOutput( text=text, char_offsets=string_output["char_offsets"], word_offsets=string_output["word_offsets"], ) else: return text # overwritten from `tokenization_utils_base.py` because tokenizer can output # `ModelOutput` which should not be a list for batched output and # because we need docs for `output_char_offsets` here def batch_decode( self, sequences: Union[list[int], list[list[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, output_char_offsets: bool = False, output_word_offsets: bool = False, **kwargs, ) -> list[str]: """ Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make use of `output_char_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched output. </Tip> output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. <Tip> Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make use of `output_word_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched output. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `list[str]` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when `output_char_offsets == True` or `output_word_offsets == True`. """ batch_decoded = [ self.decode( seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, output_word_offsets=output_word_offsets, **kwargs, ) for seq in sequences ] if output_char_offsets or output_word_offsets: # transform list of dicts to dict of lists return Wav2Vec2CTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]}) return batch_decoded # overwritten from `tokenization_utils_base.py` because we need docs for `output_char_offsets` # and `output_word_offsets` here def decode( self, token_ids: Union[int, list[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, output_char_offsets: bool = False, output_word_offsets: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. output_char_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output character offsets. Character offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed characters. <Tip> Please take a look at the example below to better understand how to make use of `output_char_offsets`. </Tip> output_word_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate and model downsampling rate to compute the time-stamps of transcribed words. <Tip> Please take a look at the example below to better understand how to make use of `output_word_offsets`. </Tip> kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when `output_char_offsets == True` or `output_word_offsets == True`. Example: ```python >>> # Let's see how to retrieve time steps for a model >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC >>> from datasets import load_dataset >>> import datasets >>> import torch >>> # import model, feature extractor, tokenizer >>> model = AutoModelForCTC.from_pretrained("facebook/wav2vec2-base-960h") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") >>> # load first sample of English common_voice >>> dataset = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="train", streaming=True) >>> dataset = dataset.cast_column("audio", datasets.Audio(sampling_rate=16_000)) >>> dataset_iter = iter(dataset) >>> sample = next(dataset_iter) >>> # forward sample through model to get greedily predicted transcription ids >>> input_values = feature_extractor(sample["audio"]["array"], return_tensors="pt").input_values >>> logits = model(input_values).logits[0] >>> pred_ids = torch.argmax(logits, axis=-1) >>> # retrieve word stamps (analogous commands for `output_char_offsets`) >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True) >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate >>> word_offsets = [ ... { ... "word": d["word"], ... "start_time": round(d["start_offset"] * time_offset, 2), ... "end_time": round(d["end_offset"] * time_offset, 2), ... } ... for d in outputs.word_offsets ... ] >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer: >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en >>> word_offsets[:3] [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}] ```""" # Convert inputs to python lists token_ids = to_py_obj(token_ids) return self._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, output_char_offsets=output_char_offsets, output_word_offsets=output_word_offsets, **kwargs, ) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) class Wav2Vec2Tokenizer(PreTrainedTokenizer): """ Constructs a Wav2Vec2 tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): File containing the vocabulary. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. word_delimiter_token (`str`, *optional*, defaults to `"|"`): The token used for defining the end of a word. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the output when decoding. do_normalize (`bool`, *optional*, defaults to `False`): Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly improve the performance for some models, *e.g.*, [wav2vec2-lv60](https://huggingface.co/models?search=lv60). return_attention_mask (`bool`, *optional*, defaults to `False`): Whether or not [`~Wav2Vec2Tokenizer.__call__`] should return `attention_mask`. <Tip> Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using `attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask` should be passed. For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as [wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be passed for batched inference. </Tip> **kwargs Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = { "vocab_file": { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/vocab.json" }, "tokenizer_config_file": { "facebook/wav2vec2-base-960h": ( "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/tokenizer.json" ), }, } model_input_names = ["input_values", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", word_delimiter_token="|", do_lower_case=False, do_normalize=False, return_attention_mask=False, **kwargs, ): warnings.warn( "The class `Wav2Vec2Tokenizer` is deprecated and will be removed in version 5 of Transformers. Please use" " `Wav2Vec2Processor` or `Wav2Vec2CTCTokenizer` instead.", FutureWarning, ) self._word_delimiter_token = word_delimiter_token self.do_lower_case = do_lower_case self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, do_lower_case=do_lower_case, do_normalize=do_normalize, return_attention_mask=return_attention_mask, word_delimiter_token=word_delimiter_token, **kwargs, ) @property def word_delimiter_token(self) -> str: """ `str`: Padding token. Log an error if used while not having been set. """ if self._word_delimiter_token is None and self.verbose: logger.error("Using word_delimiter_token, but it is not set yet.") return None return str(self._word_delimiter_token) @property def word_delimiter_token_id(self) -> Optional[int]: """ `Optional[int]`: Id of the word_delimiter_token in the vocabulary. Returns `None` if the token has not been set. """ if self._word_delimiter_token is None: return None return self.convert_tokens_to_ids(self.word_delimiter_token) @word_delimiter_token.setter def word_delimiter_token(self, value): self._word_delimiter_token = value @word_delimiter_token_id.setter def word_delimiter_token_id(self, value): self._word_delimiter_token = self.convert_tokens_to_ids(value) @add_end_docstrings(WAV2VEC2_KWARGS_DOCSTRING) def __call__( self, raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]], padding: Union[bool, str, PaddingStrategy] = False, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, padding_side: Optional[str] = None, return_tensors: Optional[Union[str, TensorType]] = None, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences. Args: raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`): The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float values, a list of numpy array or a list of list of float values. Must be mono channel audio, not stereo, i.e. single float per timestep. padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. """ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") is_batched = is_batched_numpy or ( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list))) ) # make sure input is in list format if is_batched and not isinstance(raw_speech[0], np.ndarray): raw_speech = [np.asarray(speech) for speech in raw_speech] elif not is_batched and not isinstance(raw_speech, np.ndarray): raw_speech = np.asarray(raw_speech) # always return batch if not is_batched: raw_speech = [raw_speech] # zero-mean and unit-variance normalization if self.do_normalize: raw_speech = [(x - np.mean(x)) / np.sqrt(np.var(x) + 1e-5) for x in raw_speech] # convert into correct format for padding encoded_inputs = BatchEncoding({"input_values": raw_speech}) padded_inputs = self.pad( encoded_inputs, padding=padding, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=self.return_attention_mask, return_tensors=return_tensors, verbose=verbose, ) return padded_inputs @property def vocab_size(self) -> int: return len(self.decoder) def get_vocab(self) -> dict: return dict(self.encoder, **self.added_tokens_encoder) def _convert_token_to_id(self, token: str) -> int: """Converts a token (str) in an index (integer) using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" result = self.decoder.get(index, self.unk_token) return result def convert_tokens_to_string(self, tokens: list[str]) -> str: """ Converts a connectionist-temporal-classification (CTC) output tokens into a single string. """ # group same tokens into non-repeating tokens in CTC style decoding grouped_tokens = [token_group[0] for token_group in groupby(tokens)] # filter self.pad_token which is used as CTC-blank token filtered_tokens = list(filter(lambda token: token != self.pad_token, grouped_tokens)) # replace delimiter token string = "".join([" " if token == self.word_delimiter_token else token for token in filtered_tokens]).strip() if self.do_lower_case: string = string.lower() return string def _decode( self, token_ids: list[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, **kwargs, ) -> str: """ special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on the whole token list and not individually on added tokens """ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) result = [] for token in filtered_tokens: if skip_special_tokens and ( token in self.all_special_ids or (token != self.pad_token and token in self.all_special_tokens) ): continue result.append(token) text = self.convert_tokens_to_string(result) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) __all__ = ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"]
transformers/src/transformers/models/wav2vec2/tokenization_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/tokenization_wav2vec2.py", "repo_id": "transformers", "token_count": 16841 }
527
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for Whisper.""" import json import os import warnings from functools import lru_cache from typing import Optional, Union import numpy as np import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging from .english_normalizer import BasicTextNormalizer, EnglishTextNormalizer VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "tokenizer_file": "tokenizer.json", "merges_file": "merges.txt", "normalizer_file": "normalizer.json", } MAX_MODEL_INPUT_SIZES = { "openai/whisper-base": 448, } # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) logger = logging.get_logger(__name__) # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs LANGUAGES = { "en": "english", "zh": "chinese", "de": "german", "es": "spanish", "ru": "russian", "ko": "korean", "fr": "french", "ja": "japanese", "pt": "portuguese", "tr": "turkish", "pl": "polish", "ca": "catalan", "nl": "dutch", "ar": "arabic", "sv": "swedish", "it": "italian", "id": "indonesian", "hi": "hindi", "fi": "finnish", "vi": "vietnamese", "he": "hebrew", "uk": "ukrainian", "el": "greek", "ms": "malay", "cs": "czech", "ro": "romanian", "da": "danish", "hu": "hungarian", "ta": "tamil", "no": "norwegian", "th": "thai", "ur": "urdu", "hr": "croatian", "bg": "bulgarian", "lt": "lithuanian", "la": "latin", "mi": "maori", "ml": "malayalam", "cy": "welsh", "sk": "slovak", "te": "telugu", "fa": "persian", "lv": "latvian", "bn": "bengali", "sr": "serbian", "az": "azerbaijani", "sl": "slovenian", "kn": "kannada", "et": "estonian", "mk": "macedonian", "br": "breton", "eu": "basque", "is": "icelandic", "hy": "armenian", "ne": "nepali", "mn": "mongolian", "bs": "bosnian", "kk": "kazakh", "sq": "albanian", "sw": "swahili", "gl": "galician", "mr": "marathi", "pa": "punjabi", "si": "sinhala", "km": "khmer", "sn": "shona", "yo": "yoruba", "so": "somali", "af": "afrikaans", "oc": "occitan", "ka": "georgian", "be": "belarusian", "tg": "tajik", "sd": "sindhi", "gu": "gujarati", "am": "amharic", "yi": "yiddish", "lo": "lao", "uz": "uzbek", "fo": "faroese", "ht": "haitian creole", "ps": "pashto", "tk": "turkmen", "nn": "nynorsk", "mt": "maltese", "sa": "sanskrit", "lb": "luxembourgish", "my": "myanmar", "bo": "tibetan", "tl": "tagalog", "mg": "malagasy", "as": "assamese", "tt": "tatar", "haw": "hawaiian", "ln": "lingala", "ha": "hausa", "ba": "bashkir", "jw": "javanese", "su": "sundanese", "yue": "cantonese", } # language code lookup by name, with a few language aliases TO_LANGUAGE_CODE = { **{language: code for code, language in LANGUAGES.items()}, "burmese": "my", "valencian": "ca", "flemish": "nl", "haitian": "ht", "letzeburgesch": "lb", "pushto": "ps", "panjabi": "pa", "moldavian": "ro", "moldovan": "ro", "sinhalese": "si", "castilian": "es", "mandarin": "zh", } TASK_IDS = ["translate", "transcribe"] class WhisperTokenizer(PreTrainedTokenizer): """ Construct a Whisper tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to the superclass for more information regarding such methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. normalizer_file (`str`, *optional*): Path to the normalizer_file file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. The `decoder_start_token_id` is used to set the first token as `"<|startoftranscript|>"` when generating. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. pad_token (`str`, *optional*): The token used for padding, for example when batching sequences of different lengths. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. language (`str`, *optional*): The language of the transcription text. The corresponding language id token is appended to the start of the sequence for multilingual speech recognition and speech translation tasks, e.g. for Spanish the token `"<|es|>"` is appended to the start of sequence. This should be used for multilingual fine-tuning only. task (`str`, *optional*): Task identifier to append at the start of sequence (if any). This should be used for mulitlingual fine-tuning, with `"transcribe"` for speech recognition and `"translate"` for speech translation. predict_timestamps (`bool`, *optional*, defaults to `False`): Whether to omit the `<|notimestamps|>` token at the start of the sequence. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, normalizer_file=None, errors="replace", unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token=None, add_prefix_space=False, language=None, task=None, predict_timestamps=False, **kwargs, ): bos_token = ( AddedToken(bos_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(bos_token, str) else bos_token ) eos_token = ( AddedToken(eos_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(eos_token, str) else eos_token ) unk_token = ( AddedToken(unk_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(unk_token, str) else unk_token ) pad_token = ( AddedToken(pad_token, lstrip=False, rstrip=False, normalized=False, special=True) if isinstance(pad_token, str) else pad_token ) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space if normalizer_file is not None: with open(normalizer_file, encoding="utf-8") as vocab_handle: self.english_spelling_normalizer = json.load(vocab_handle) else: self.english_spelling_normalizer = None # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") self.timestamp_pat = re.compile(r"<\|(\d+\.\d+)\|>") self.language = language super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, **kwargs, ) self.task = task self.predict_timestamps = predict_timestamps @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe with GPT2 -> Whisper def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def set_prefix_tokens( self, language: Optional[str] = None, task: Optional[str] = None, predict_timestamps: Optional[bool] = None ): """ Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to update the prefix tokens as required when fine-tuning. Example: ```python >>> # instantiate the tokenizer and set the prefix token to Spanish >>> tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="spanish") >>> # now switch the prefix token from Spanish to French >>> tokenizer.set_prefix_tokens(language="french") ``` Args: language (`str`, *optional*, defaults to `None`): The language of the transcription text. task (`str`, *optional*, defaults to `None`): Task identifier to append at the start of sequence (if any). predict_timestamps (`bool`, *optional*, defaults to `None`): Whether to omit the `<|notimestamps|>` token at the start of the sequence. """ self.language = language if language is not None else self.language self.task = task if task is not None else self.task self.predict_timestamps = predict_timestamps if predict_timestamps is not None else self.predict_timestamps @property def prefix_tokens(self) -> list[int]: bos_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") translate_token_id = self.convert_tokens_to_ids("<|translate|>") transcribe_token_id = self.convert_tokens_to_ids("<|transcribe|>") notimestamps_token_id = self.convert_tokens_to_ids("<|notimestamps|>") langs = tuple(LANGUAGES.keys()) if self.language is not None: self.language = self.language.lower() if self.language in TO_LANGUAGE_CODE: language_id = TO_LANGUAGE_CODE[self.language] elif self.language in TO_LANGUAGE_CODE.values(): language_id = self.language else: is_language_code = len(self.language) == 2 raise ValueError( f"Unsupported language: {self.language}. Language should be one of:" f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}." ) if self.task is not None: if self.task not in TASK_IDS: raise ValueError(f"Unsupported task: {self.task}. Task should be in: {TASK_IDS}") bos_sequence = [bos_token_id] if self.language is not None: bos_sequence.append(bos_token_id + 1 + langs.index(language_id)) if self.task is not None: bos_sequence.append(transcribe_token_id if self.task == "transcribe" else translate_token_id) if not self.predict_timestamps: bos_sequence.append(notimestamps_token_id) return bos_sequence # Copied from transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer.build_inputs_with_special_tokens def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]: """Build model inputs from a sequence by appending eos_token_id.""" if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id] # Copied from transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize with GPT2 -> Whisper def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id with GPT2 -> Whisper def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """ Converts an index (integer) in a token (str) using the vocab. Whisper's base tokenizer always decodes OOV tokens as "", thus we do not use the `unk_token` here. """ return self.decoder.get(index, "") def _normalize(self, text): warnings.warn( "The private method `_normalize` is deprecated and will be removed in v5 of Transformers." "You can normalize an input string using the Whisper English normalizer using the `normalize` method." ) return self.normalize(text) def _basic_normalize(self, text, remove_diacritics=False): warnings.warn( "The private method `_basic_normalize` is deprecated and will be removed in v5 of Transformers." "You can normalize an input string using the Whisper basic normalizer using the `basic_normalize` method." ) return self.basic_normalize(text, remove_diacritics=remove_diacritics) def normalize(self, text): """ Normalize a given string using the `EnglishTextNormalizer` class, which performs commons transformation on english text. """ normalizer = EnglishTextNormalizer(self.english_spelling_normalizer) return normalizer(text) @staticmethod def basic_normalize(text, remove_diacritics=False): """ Normalize a given string using the `BasicTextNormalizer` class, which performs commons transformation on multilingual text. """ normalizer = BasicTextNormalizer(remove_diacritics=remove_diacritics) return normalizer(text) def _decode_with_timestamps( self, token_ids, skip_special_tokens=False, time_precision=0.02, segment_size=1500 ) -> str: """ Timestamp tokens are above the special tokens' id range and are ignored by `decode()`. This method decodes given tokens with timestamps tokens annotated, e.g. "<|1.08|>". """ timestamp_begin = self.all_special_ids[-1] + 1 outputs = [[]] cur_max_timestamp = 0.0 prev_segments_len = 0.0 penultimate_timestamp = 0.0 for i, token in enumerate(token_ids): if token >= timestamp_begin: timestamp = float((token - timestamp_begin) * time_precision) if timestamp < cur_max_timestamp: # next segment has started last_was_single_ending = i >= 2 and not ( token_ids[i - 1] >= timestamp_begin and token_ids[i - 2] >= timestamp_begin ) if last_was_single_ending: prev_segments_len += time_precision * segment_size else: cur_max_timestamp = penultimate_timestamp prev_segments_len += penultimate_timestamp outputs = outputs[:-2] penultimate_timestamp = cur_max_timestamp cur_max_timestamp = timestamp outputs.append(f"<|{(timestamp + prev_segments_len):.2f}|>") outputs.append([]) else: outputs[-1].append(token) outputs = [ s if isinstance(s, str) else self.decode(s, skip_special_tokens=skip_special_tokens) for s in outputs ] return "".join(outputs) def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500): """ Compute offsets for a given tokenized input Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. time_precision (`float`, *optional*, defaults to 0.02): The time ratio to convert from token to time. segment_size (`int`, *optional*, defaults to 1500): The number of features in the input mel spectrogram. """ offsets = [] # ensure torch tensor of token ids is placed on cpu if "torch" in str(type(token_ids)) and (hasattr(token_ids, "cpu") and callable(token_ids.cpu)): token_ids = token_ids.cpu() token_ids = np.array(token_ids) if token_ids.shape[0] > 1 and len(token_ids.shape) > 1: raise ValueError("Can only process a single input at a time") timestamp_begin = self.all_special_ids[-1] + 1 timestamp_tokens = token_ids >= timestamp_begin consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1 if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1: # either there are no timestamps or there are no consecutive ones return [] elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive: # we add the final timestamp if it is not already in the list consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1) last_slice = np.where(timestamp_tokens)[0][0] cur_max_timestamp = 0 prev_segments_len = 0 for current_slice in consecutive: sliced_tokens = token_ids[last_slice:current_slice] if len(sliced_tokens) > 1: start_timestamp_position = sliced_tokens[0].item() - timestamp_begin end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin if start_timestamp_position < cur_max_timestamp: # next segment has started is_single_ending = last_slice >= 2 and not ( token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin ) if is_single_ending: prev_segments_len += segment_size else: prev_segments_len += cur_max_timestamp cur_max_timestamp = end_timestamp_position # strip timestamp tokens from the text output sliced_tokens = self._preprocess_token_ids(sliced_tokens) text = self._decode(sliced_tokens) text = self._filter_timestamp_ids(text) offsets.append( { "text": text, "timestamp": ( start_timestamp_position * time_precision + prev_segments_len * time_precision, end_timestamp_position * time_precision + prev_segments_len * time_precision, ), } ) last_slice = current_slice return offsets @lru_cache def timestamp_ids(self, time_precision=0.02): """ Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache. Args: time_precision (`float`, *optional*, defaults to 0.02): The time ratio to convert from token to time. """ return self.convert_tokens_to_ids([("<|%.2f|>" % (i * time_precision)) for i in range(1500 + 1)]) def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool = False): """ Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids. Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be removed. """ if skip_special_tokens: prompt_token_id = self.convert_tokens_to_ids("<|startofprev|>") decoder_start_token_id = self.convert_tokens_to_ids("<|startoftranscript|>") token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id) return token_ids def _filter_timestamp_ids(self, token_ids): return re.sub(self.timestamp_pat, "", token_ids) def decode( self, token_ids, skip_special_tokens: bool = False, clean_up_tokenization_spaces: Optional[bool] = None, output_offsets: bool = False, time_precision: float = 0.02, decode_with_timestamps: bool = False, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, list[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. Will remove the previous tokens (pre-prompt) if present. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). output_offsets (`bool`, *optional*, defaults to `False`): Whether or not to output the offsets of the tokens. This should only be set if the model predicted timestamps. If there are previous tokens (pre-prompt) to decode, they will only appear in the decoded text if they contain timestamp tokens. time_precision (`float`, *optional*, defaults to 0.02): The time ratio to convert from token to time. decode_with_timestamps (`bool`, *optional*, defaults to `False`): Whether or not to decode with timestamps included in the raw text. normalize (`bool`, *optional*, defaults to `False`): Whether or not to apply the English text normalizer to the decoded text. Only applicable when the target text is in English. Otherwise, the basic text normalizer should be applied. basic_normalize (`bool`, *optional*, defaults to `False`): Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual target text. remove_diacritics (`bool`, *optional*, defaults to `False`): Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may destroy information in the decoded text, hence it should be used with caution. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ filtered_ids = self._preprocess_token_ids( token_ids, skip_special_tokens=skip_special_tokens, ) text = super().decode( filtered_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, normalize=normalize, basic_normalize=basic_normalize, remove_diacritics=remove_diacritics, **kwargs, ) if decode_with_timestamps: # legacy method to decode timestamps when not included in the tokenizer vocabulary text = self._decode_with_timestamps( filtered_ids, time_precision=time_precision, skip_special_tokens=skip_special_tokens ) else: text = self._filter_timestamp_ids(text) # retrieve offsets if output_offsets: offsets = self._compute_offsets(token_ids, time_precision=time_precision) return {"text": text, "offsets": offsets} return text def _decode( self, token_ids: Union[int, list[int]], skip_special_tokens: bool = False, normalize: bool = False, basic_normalize: bool = False, remove_diacritics: bool = False, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) text = "".join(sub_texts) if normalize: clean_text = self.normalize(text) return clean_text elif basic_normalize: clean_text = self.basic_normalize(text, remove_diacritics=remove_diacritics) return clean_text else: return text # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string with GPT2 -> Whisper def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) normalizer_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["normalizer_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 if self.english_spelling_normalizer is not None: with open(normalizer_file, "w", encoding="utf-8") as f: f.write( json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + "\n" ) return vocab_file, merge_file, normalizer_file # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.prepare_for_tokenization with GPT2 -> Whisper def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): self.set_prefix_tokens(task=task, language=language, predict_timestamps=not no_timestamps) # prefix tokens are of the form: <|startoftranscript|> <|lang_id|> <|task|> <|notimestamps|> # we don't want to force the bos token at position 1, as this is the starting token # when we generate, so we slice the prefix tokens to: <|lang_id|> <|task|> <|notimestamps|> # to get the forced tokens forced_tokens = self.prefix_tokens[1:] forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(forced_tokens)] return forced_decoder_ids def _decode_asr(self, model_outputs, *, return_timestamps, return_language, time_precision): return _decode_asr( self, model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision, ) def get_prompt_ids(self, text: str, return_tensors="np"): """Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`].""" batch_encoding = self("<|startofprev|>", " " + text.strip(), add_special_tokens=False) # Check for special tokens prompt_text_ids = batch_encoding["input_ids"][1:] special_token_id = next((x for x in prompt_text_ids if x >= self.all_special_ids[0]), None) if special_token_id is not None: token = self.convert_ids_to_tokens(special_token_id) raise ValueError(f"Encountered text in the prompt corresponding to disallowed special token: {token}.") batch_encoding.convert_to_tensors(tensor_type=return_tensors) return batch_encoding["input_ids"] def _strip_prompt(self, token_ids: list[int], prompt_token_id: int, decoder_start_token_id: int): if not isinstance(token_ids, list): token_ids = self._convert_to_list(token_ids) # handle case of empty token_ids for decoding with timestamps. # at this point token_ids is a list, so it is safe to use if not check. if not token_ids: return token_ids has_prompt = token_ids[0] == prompt_token_id if has_prompt: if decoder_start_token_id in token_ids: return token_ids[token_ids.index(decoder_start_token_id) :] else: return [] return token_ids @staticmethod def _convert_to_list(token_ids): # convert type to ndarray if necessary if hasattr(token_ids, "numpy"): if "torch" in str(type(token_ids)): token_ids = token_ids.cpu().numpy() elif "tensorflow" in str(type(token_ids)): token_ids = token_ids.numpy() elif "jaxlib" in str(type(token_ids)): token_ids = token_ids.tolist() # now the token ids are either a numpy array, or a list of lists if isinstance(token_ids, np.ndarray): token_ids = token_ids.tolist() return token_ids def _decode_asr(tokenizer, model_outputs, *, return_timestamps, return_language, time_precision, segment_size=1500): """ Internal method meant to only be used by asr pipeline. Handles all the little quirks specific to whisper to handle the various options not allowed in other seq2seq models """ # =========== Overview ============ # - iterate over all outputs # - all tokens within output # - Each token can be # - language token # - special token # - timestamp token # - text token # - We accumulate the text tokens. # - We split on end timestamps # - Lots of complexity comes from stride and timestamps last_language = None def new_chunk(): return {"language": last_language, "timestamp": [None, None], "text": ""} # Welcome to the state machine ! chunks = [] chunk = new_chunk() time_offset = 0.0 timestamp_begin = tokenizer.convert_tokens_to_ids("<|notimestamps|>") + 1 previous_tokens = [] previous_token_timestamps = [] skip = False right_stride_start = None all_special_ids = set(tokenizer.all_special_ids) prompt_token_id = tokenizer.convert_tokens_to_ids("<|startofprev|>") decoder_start_token_id = tokenizer.convert_tokens_to_ids("<|startoftranscript|>") # - iterate over all outputs for chunk_id, output in enumerate(model_outputs): # We can drop everything to Python list, it's going to make # our lives easier token_ids = output["tokens"][0].tolist() # (possibly) remove the prompt from the token ids token_ids = tokenizer._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id) if return_timestamps == "word": token_timestamps = output["token_timestamps"][0].tolist() # Those keep track of timestamps within strides # Which need to be skipped and resolve all tokens in a single # chunk. last_timestamp = None first_timestamp = timestamp_begin # long form generation: we need to handle the case where the call to generate returns concatenated segments, # with underlying multiple calls to generate cur_max_timestamp = 0.0 prev_segments_len = 0.0 penultimate_timestamp = 0.0 if "stride" in output: chunk_len, stride_left, stride_right = output["stride"] # Offset the timings to account for the other `model_outputs`. time_offset -= stride_left right_stride_start = chunk_len - stride_right # Keeping track of timestamps within strides # We're going to NOT split on those, and delay until we're # out of BOTH stride. Otherwise lots of issues occur and # corner cases if stride_left: first_timestamp = stride_left / time_precision + timestamp_begin if stride_right: for token in reversed(token_ids): if token >= timestamp_begin: # There can be several token in the right stride # But the last one is ALWAYS going to be skipped if ( last_timestamp is not None and (token - timestamp_begin) * time_precision < right_stride_start ): break last_timestamp = token current_tokens = [] current_token_timestamps = [] # - all tokens within output for i, token in enumerate(token_ids): # 4 possible states for each token # - 1/ Language code # - 2/ all other special tokens (which we ignore) # - 3/ Timestamp # - 4/ Regular text if token in all_special_ids: # Either language code or other text = tokenizer.decode([token]) # Removing outer shell <|XX|> text = text[2:-2] language = LANGUAGES.get(text) if language is not None: # 1/ Indeed some language # TODO Handle when language is different from the previous # one, and we cannot use timestamped tokens to create chunks if last_language and language != last_language and not return_timestamps: previous_tokens.append(current_tokens) resolved_tokens = _find_longest_common_sequence(previous_tokens) resolved_text = tokenizer.decode(resolved_tokens) chunk["text"] = resolved_text chunks.append(chunk) # Flush all our temporary context previous_tokens = [] current_tokens = [] chunk = new_chunk() chunk["language"] = language last_language = language else: # 2/ This is a regular special token, ignoring it pass elif token >= timestamp_begin: # 3/ Timestamp token timestamp = float((token - timestamp_begin) * time_precision) if timestamp < cur_max_timestamp: # next segment has started last_was_single_ending = i >= 2 and not ( token_ids[i - 1] >= timestamp_begin and token_ids[i - 2] >= timestamp_begin ) if last_was_single_ending: prev_segments_len += time_precision * segment_size else: cur_max_timestamp = penultimate_timestamp prev_segments_len += penultimate_timestamp penultimate_timestamp = cur_max_timestamp cur_max_timestamp = timestamp time = (token - timestamp_begin) * time_precision + time_offset + prev_segments_len time = round(time, 2) if last_timestamp and token >= last_timestamp: # Whisper outputted a timestamp token, but it falls within # our stride, so we're going to skip it for the time being # and resolve this later # Skip is necessary because timestamp tokens always come # by pair, so we need to skip the next one too (which would mark the start of another chunk). skip = True elif skip or (previous_tokens and token < first_timestamp): skip = False elif chunk["timestamp"][0] is None: chunk["timestamp"][0] = time else: # This is the end of the timestamp chunk if time == chunk["timestamp"][0]: # This is a bug in timestamp token output # where we're taking the duplicate token # as a stop where it should be a start. # This is an issue in the underlying model output # Let's just skip it so it becomes de-factor # a start again pass else: chunk["timestamp"][1] = time # Handling merges. previous_tokens.append(current_tokens) if return_timestamps == "word": previous_token_timestamps.append(current_token_timestamps) resolved_tokens, resolved_token_timestamps = _find_longest_common_sequence( previous_tokens, previous_token_timestamps ) resolved_text = tokenizer.decode(resolved_tokens) chunk["text"] = resolved_text if return_timestamps == "word": chunk["words"] = _collate_word_timestamps( tokenizer, resolved_tokens, resolved_token_timestamps, last_language, return_language ) chunks.append(chunk) # Flush all our temporary context previous_tokens = [] current_tokens = [] previous_token_timestamps = [] current_token_timestamps = [] chunk = new_chunk() else: # 4/ Regular token # We just append to the list of all tokens so we can handle # merges later and decode into text. current_tokens.append(token) if return_timestamps == "word": if i == 0: start_time = round(0.0 + time_offset, 2) else: start_time = round(token_timestamps[i - 1] + time_offset, 2) end_time = round(token_timestamps[i] + time_offset, 2) current_token_timestamps.append((start_time, end_time)) if "stride" in output: time_offset += chunk_len - stride_right # Leftover tokens if current_tokens: previous_tokens.append(current_tokens) if return_timestamps == "word": previous_token_timestamps.append(current_token_timestamps) elif not (any(p for p in previous_tokens)): chunk = new_chunk() previous_tokens = [] current_tokens = [] previous_token_timestamps = [] current_token_timestamps = [] if previous_tokens: if return_timestamps: logger.warning( "Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. " "Also make sure WhisperTimeStampLogitsProcessor was used during generation." ) # Happens when we don't use timestamps resolved_tokens, resolved_token_timestamps = _find_longest_common_sequence( previous_tokens, previous_token_timestamps ) resolved_text = tokenizer.decode(resolved_tokens) chunk["text"] = resolved_text if return_timestamps == "word": chunk["words"] = _collate_word_timestamps( tokenizer, resolved_tokens, resolved_token_timestamps, last_language, return_language ) chunks.append(chunk) # Preparing and cleaning up the pipeline output full_text = "".join(chunk["text"] for chunk in chunks) if return_timestamps or return_language: for chunk in chunks: if not return_timestamps: chunk.pop("timestamp") else: chunk["timestamp"] = tuple(chunk["timestamp"]) if not return_language: chunk.pop("language") if return_timestamps == "word": new_chunks = [] for chunk in chunks: new_chunks.extend(chunk["words"]) optional = {"chunks": new_chunks} else: optional = {"chunks": chunks} else: optional = {} return full_text, optional def _find_longest_common_sequence(sequences, token_timestamp_sequences=None): # It would be much harder to do O(n) because of fault tolerance. # We actually have a really good property which is that the total sequence # MUST be those subsequences in order. # If token_timestamp_sequences is provided, will split those sequences in # exactly the same way. left_sequence = sequences[0] left_length = len(left_sequence) total_sequence = [] if token_timestamp_sequences: left_token_timestamp_sequence = token_timestamp_sequences[0] total_token_timestamp_sequence = [] for seq_idx, right_sequence in enumerate(sequences[1:]): # index = 0 max_ = 0.0 max_indices = (left_length, left_length, 0, 0) # Here we're sliding matches # [a, b, c, d] # [c, d, f] # = [c] == [d] # # [a, b, c, d] # [c, d, f] # = [c, d] == [c, d] # # # [a, b, c, d] # [c, d, f] # # = [b, c, d] == [c, d, f] # # [a, b, c, d] # [c, d, f] # # [a, b, c] == [c, d, f] # # [a, b, c, d] # [d, f] # # [a, b] == [d, f] # # [a, b, c, d] # [f] # # [a] == [f] right_length = len(right_sequence) for i in range(1, left_length + right_length): # epsilon to favor long perfect matches eps = i / 10000.0 # Slightly convoluted because we don't want out of bound indices # This will be necessary for a small conflict resolution optimization # later left_start = max(0, left_length - i) left_stop = min(left_length, left_length + right_length - i) left = np.array(left_sequence[left_start:left_stop]) right_start = max(0, i - left_length) right_stop = min(right_length, i) right = np.array(right_sequence[right_start:right_stop]) # We can only match subsequences of the same size. if len(left) != len(right): raise RuntimeError( "There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference." ) if token_timestamp_sequences: # Get length of longest subsequence of tokens that match # and have timestamps that are in order matches = sum( 1 for idx, elem in enumerate(left) if ( elem == right[idx] and left_token_timestamp_sequence[left_start + idx] <= token_timestamp_sequences[seq_idx + 1][right_start + idx] ) ) else: matches = np.sum(left == right) matching = matches / i + eps if matches > 1 and matching > max_: max_ = matching max_indices = (left_start, left_stop, right_start, right_stop) (left_start, left_stop, right_start, right_stop) = max_indices # This is a small conflict optimization since those sequences overlap # in audio. # We're going to give more confidence to the left sequence # for the left of the overlap, # and to the right of the sequence, for the right of the overlap left_mid = (left_stop + left_start) // 2 right_mid = (right_stop + right_start) // 2 total_sequence.extend(left_sequence[:left_mid]) left_sequence = right_sequence[right_mid:] left_length = len(left_sequence) if token_timestamp_sequences: total_token_timestamp_sequence.extend(left_token_timestamp_sequence[:left_mid]) left_token_timestamp_sequence = token_timestamp_sequences[seq_idx + 1][right_mid:] total_sequence.extend(left_sequence) if token_timestamp_sequences is None: return total_sequence if len(token_timestamp_sequences) > 0: total_token_timestamp_sequence.extend(left_token_timestamp_sequence) return total_sequence, total_token_timestamp_sequence else: return total_sequence, [] def _collate_word_timestamps(tokenizer, tokens, token_timestamps, language, return_language): words, _, token_indices = _combine_tokens_into_words(tokenizer, tokens, language) optional_language_field = {"language": language} if return_language else {} timings = [ { "text": word, "timestamp": (token_timestamps[indices[0]][0], token_timestamps[indices[-1]][1]), **optional_language_field, } for word, indices in zip(words, token_indices) ] return timings def _combine_tokens_into_words( tokenizer, tokens: list[int], language: Optional[str] = None, prepend_punctuations: str = "\"'“¡¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", ): """ Groups tokens by word. Returns a tuple containing a list of strings with the words, and a list of `token_id` sequences with the tokens making up each word. """ if language is None: language = tokenizer.language if language is None: language = "english" if language in {"chinese", "japanese", "thai", "lao", "myanmar", "cantonese"}: # These languages don't typically use spaces. words, word_tokens, token_indices = _split_tokens_on_unicode(tokenizer, tokens) else: words, word_tokens, token_indices = _split_tokens_on_spaces(tokenizer, tokens) _merge_punctuations(words, word_tokens, token_indices, prepend_punctuations, append_punctuations) return words, word_tokens, token_indices def _split_tokens_on_unicode(tokenizer, tokens: list[int]): """Combine tokens into words by splitting at any position where the tokens are decoded as valid unicode points.""" decoded_full = tokenizer.decode(tokens, decode_with_timestamps=True) replacement_char = "\ufffd" words = [] word_tokens = [] token_indices = [] current_tokens = [] current_indices = [] unicode_offset = 0 for token_idx, token in enumerate(tokens): current_tokens.append(token) current_indices.append(token_idx) decoded = tokenizer.decode(current_tokens, decode_with_timestamps=True) if ( replacement_char not in decoded or decoded_full[unicode_offset + decoded.index(replacement_char)] == replacement_char ): words.append(decoded) word_tokens.append(current_tokens) token_indices.append(current_indices) current_tokens = [] current_indices = [] unicode_offset += len(decoded) return words, word_tokens, token_indices def _split_tokens_on_spaces(tokenizer, tokens: list[int]): """Combine tokens into words by splitting at whitespace and punctuation tokens.""" subwords, subword_tokens_list, subword_indices_list = _split_tokens_on_unicode(tokenizer, tokens) words = [] word_tokens = [] token_indices = [] for subword, subword_tokens, subword_indices in zip(subwords, subword_tokens_list, subword_indices_list): special = subword_tokens[0] >= tokenizer.eos_token_id with_space = subword.startswith(" ") punctuation = subword.strip() in "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" if special or with_space or punctuation or len(words) == 0: words.append(subword) word_tokens.append(subword_tokens) token_indices.append(subword_indices) else: words[-1] = words[-1] + subword word_tokens[-1].extend(subword_tokens) token_indices[-1].extend(subword_indices) return words, word_tokens, token_indices def _merge_punctuations(words, tokens, indices, prepended, appended): """Merges punctuation tokens with neighboring words.""" # prepend punctuations i = len(words) - 2 j = len(words) - 1 while i >= 0: if words[i].startswith(" ") and words[i].strip() in prepended: words[j] = words[i] + words[j] tokens[j] = tokens[i] + tokens[j] indices[j] = indices[i] + indices[j] words[i] = "" tokens[i] = [] indices[i] = [] else: j = i i -= 1 # append punctuations i = 0 j = 1 while j < len(words): if not words[i].endswith(" ") and words[j] in appended: words[i] += words[j] tokens[i] += tokens[j] indices[i] += indices[j] words[j] = "" tokens[j] = [] indices[j] = [] else: i = j j += 1 # remove elements that are now empty words[:] = [word for word in words if word] tokens[:] = [token for token in tokens if token] indices[:] = [idx for idx in indices if idx] __all__ = ["WhisperTokenizer"]
transformers/src/transformers/models/whisper/tokenization_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/tokenization_whisper.py", "repo_id": "transformers", "token_count": 26904 }
528
# coding=utf-8 # Copyright 2021 The Fairseq Authors The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch XGLM model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_xglm import XGLMConfig logger = logging.get_logger(__name__) # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->XGLM class XGLMScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class XGLMSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, position_ids: Optional[torch.Tensor] = None, past_key_values_length: int = 0): bsz, seq_len = position_ids.size() position_ids += self.offset # Expand embeddings if needed. `position_ids.max()` is NOT used to keep torch.fx compatibility. max_pos = 2 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() class XGLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, layer_idx: Optional[bool] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() src_len = key_value_states.shape[1] if is_cross_attention else tgt_len # get query proj query_states = self.q_proj(hidden_states) * self.scaling if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(bsz, src_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, src_len, -1, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device) ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class XGLMDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: XGLMConfig, layer_idx=None): super().__init__() self.embed_dim = config.d_model self.self_attn = XGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout if config.add_cross_attention: self.encoder_attn = XGLMAttention( embed_dim=self.embed_dim, num_heads=config.attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class XGLMPreTrainedModel(PreTrainedModel): config: XGLMConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["XGLMDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @auto_docstring class XGLMModel(XGLMPreTrainedModel): def __init__(self, config: XGLMConfig, embed_tokens: Optional[nn.Embedding] = None): r""" embed_tokens (`nn.Embedding`, *optional*): output embeddings """ super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = XGLMScaledWordEmbedding( config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale ) self.embed_positions = XGLMSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, config.pad_token_id, ) self.layers = nn.ModuleList([XGLMDecoderLayer(config, layer_idx=i) for i in range(config.num_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing`. Setting `use_cache = False`..." ) use_cache = False # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(), DynamicCache()) if encoder_hidden_states is not None else DynamicCache() ) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) if position_ids is None: position_ids = torch.arange( past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=input_ids.device if input_ids is not None else inputs_embeds.device, ) position_ids = position_ids.unsqueeze(0) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) hidden_states = inputs_embeds + self.embed_positions(position_ids, past_key_values_length).to( inputs_embeds.device ) hidden_states = nn.functional.dropout(hidden_states, p=float(self.dropout), training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring( custom_intro=""" The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class XGLMForCausalLM(XGLMPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = XGLMModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, pad_token_id=self.config.pad_token_id, **kwargs, ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) __all__ = ["XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel"]
transformers/src/transformers/models/xglm/modeling_xglm.py/0
{ "file_path": "transformers/src/transformers/models/xglm/modeling_xglm.py", "repo_id": "transformers", "token_count": 14553 }
529
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert X-MOD checkpoint.""" import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("0.12.2"): raise Exception("requires fairseq >= 0.12.2") if version.parse(fairseq.__version__) > version.parse("2"): raise Exception("requires fairseq < v2") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = "Hello, World!" SAMPLE_LANGUAGE = "en_XX" def convert_xmod_checkpoint_to_pytorch( xmod_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool ): data_dir = Path("data_bin") xmod = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(xmod_checkpoint_path).parent), checkpoint_file=Path(xmod_checkpoint_path).name, _name="xmod_base", arch="xmod_base", task="multilingual_masked_lm", data_name_or_path=str(data_dir), bpe="sentencepiece", sentencepiece_model=str(Path(xmod_checkpoint_path).parent / "sentencepiece.bpe.model"), src_dict=str(data_dir / "dict.txt"), ) xmod.eval() # disable dropout print(xmod) xmod_sent_encoder = xmod.model.encoder.sentence_encoder config = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, # PyTorch default used in fairseq pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, "bottleneck", 2), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, ) if classification_head: config.num_labels = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our X-MOD config:", config) model = XmodForSequenceClassification(config) if classification_head else XmodForMaskedLM(config) model.eval() # Now let's copy all the weights. # Embeddings model.roberta.embeddings.word_embeddings.weight = xmod_sent_encoder.embed_tokens.weight model.roberta.embeddings.position_embeddings.weight = xmod_sent_encoder.embed_positions.weight model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. model.roberta.embeddings.LayerNorm.weight = xmod_sent_encoder.layernorm_embedding.weight model.roberta.embeddings.LayerNorm.bias = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers): # Encoder: start of layer layer = model.roberta.encoder.layer[i] xmod_layer = xmod_sent_encoder.layers[i] # self attention self_attn = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)) ): raise AssertionError("Dimensions of self-attention weights do not match.") self_attn.query.weight.data = xmod_layer.self_attn.q_proj.weight self_attn.query.bias.data = xmod_layer.self_attn.q_proj.bias self_attn.key.weight.data = xmod_layer.self_attn.k_proj.weight self_attn.key.bias.data = xmod_layer.self_attn.k_proj.bias self_attn.value.weight.data = xmod_layer.self_attn.v_proj.weight self_attn.value.bias.data = xmod_layer.self_attn.v_proj.bias # self-attention output self_output = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError("Dimensions of self-attention output weights do not match.") self_output.dense.weight = xmod_layer.self_attn.out_proj.weight self_output.dense.bias = xmod_layer.self_attn.out_proj.bias self_output.LayerNorm.weight = xmod_layer.self_attn_layer_norm.weight self_output.LayerNorm.bias = xmod_layer.self_attn_layer_norm.bias # intermediate intermediate = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fc1.weight.shape: raise AssertionError("Dimensions of intermediate weights do not match.") intermediate.dense.weight = xmod_layer.fc1.weight intermediate.dense.bias = xmod_layer.fc1.bias # output bert_output = layer.output if bert_output.dense.weight.shape != xmod_layer.fc2.weight.shape: raise AssertionError("Dimensions of feed-forward weights do not match.") bert_output.dense.weight = xmod_layer.fc2.weight bert_output.dense.bias = xmod_layer.fc2.bias bert_output.LayerNorm.weight = xmod_layer.final_layer_norm.weight bert_output.LayerNorm.bias = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: bert_output.adapter_layer_norm.weight = xmod_layer.adapter_layer_norm.weight bert_output.adapter_layer_norm.bias = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()): raise AssertionError("Lists of language adapters do not match.") for lang_code in xmod_layer.adapter_modules: to_adapter = bert_output.adapter_modules[lang_code] from_adapter = xmod_layer.adapter_modules[lang_code] to_adapter.dense1.weight = from_adapter.fc1.weight to_adapter.dense1.bias = from_adapter.fc1.bias to_adapter.dense2.weight = from_adapter.fc2.weight to_adapter.dense2.bias = from_adapter.fc2.bias # end of layer if xmod_sent_encoder.layer_norm is not None: model.roberta.encoder.LayerNorm.weight = xmod_sent_encoder.layer_norm.weight model.roberta.encoder.LayerNorm.bias = xmod_sent_encoder.layer_norm.bias if classification_head: model.classifier.dense.weight = xmod.model.classification_heads["mnli"].dense.weight model.classifier.dense.bias = xmod.model.classification_heads["mnli"].dense.bias model.classifier.out_proj.weight = xmod.model.classification_heads["mnli"].out_proj.weight model.classifier.out_proj.bias = xmod.model.classification_heads["mnli"].out_proj.bias else: # LM Head model.lm_head.dense.weight = xmod.model.encoder.lm_head.dense.weight model.lm_head.dense.bias = xmod.model.encoder.lm_head.dense.bias model.lm_head.layer_norm.weight = xmod.model.encoder.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = xmod.model.encoder.lm_head.layer_norm.bias model.lm_head.decoder.weight = xmod.model.encoder.lm_head.weight model.lm_head.decoder.bias = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. input_ids = xmod.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1 model.roberta.set_default_language(SAMPLE_LANGUAGE) our_output = model(input_ids)[0] if classification_head: their_output = xmod.model.classification_heads["mnli"](xmod.extract_features(input_ids)) else: their_output = xmod.model(input_ids, lang_id=[SAMPLE_LANGUAGE])[0] print(our_output.shape, their_output.shape) max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7 success = torch.allclose(our_output, their_output, atol=1e-3) print("Do both models output the same tensors?", "🔥" if success else "💩") if not success: raise Exception("Something went wRoNg") Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) args = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
transformers/src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/xmod/convert_xmod_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 4103 }
530
# coding=utf-8 # Copyright 2024 Zyphra Technologies and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Zamba model.""" import math from typing import Any, Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available from .configuration_zamba import ZambaConfig if is_mamba_ssm_available(): from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, selective_scan_fn from mamba_ssm.ops.triton.selective_state_update import selective_state_update else: selective_state_update, selective_scan_fn, mamba_inner_fn = None, None, None if is_causal_conv1d_available(): from causal_conv1d import causal_conv1d_fn, causal_conv1d_update else: causal_conv1d_update, causal_conv1d_fn = None, None is_fast_path_available = all( (selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn) ) logger = logging.get_logger(__name__) # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Zamba class ZambaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ ZambaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class ZambaHybridDynamicCache: """ A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache (which has a constant shape regardless of seq_len). This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states` and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`, while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors). For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors), while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`, and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`. """ is_compileable = False def __init__(self, config, batch_size, dtype=torch.float16, device=None): self.dtype = dtype self.is_compileable = False self.layers_block_type = config.layers_block_type self.has_previous_state = False # only used by mamba self.intermediate_size = config.mamba_expand * config.hidden_size self.ssm_state_size = config.mamba_d_state self.conv_kernel_size = config.mamba_d_conv self.n_mamba_heads = config.n_mamba_heads self.conv_states = [] self.ssm_states = [] self.transformer_layers = [] self._modules = {} self._parameters = {} self._buffers = {} for i in range(config.num_hidden_layers): self.conv_states += [ torch.zeros(batch_size, self.intermediate_size, self.conv_kernel_size, device=device, dtype=dtype) ] cache_shape = ( batch_size, self.n_mamba_heads, self.intermediate_size // self.n_mamba_heads, self.ssm_state_size, ) self.ssm_states += [torch.zeros(cache_shape, device=device, dtype=dtype)] if self.layers_block_type[i] == "hybrid": self.transformer_layers.append(i) self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)] def __len__(self): return len(self.key_cache) def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]: return self.key_cache[layer_idx], self.value_cache[layer_idx] # Copied from transformers.models.jamba.modeling_jamba.HybridMambaAttentionDynamicCache.update def update( self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]] = None, ) -> tuple[torch.Tensor, torch.Tensor]: # Update the cache if self.key_cache[layer_idx].shape[-1] == 0: self.key_cache[layer_idx] = key_states self.value_cache[layer_idx] = value_states else: self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2) self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2) return self.key_cache[layer_idx], self.value_cache[layer_idx] # Copied from transformers.models.jamba.modeling_jamba.HybridMambaAttentionDynamicCache.reorder_cache def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" for layer_idx in range(len(self.key_cache)): device = self.key_cache[layer_idx].device self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.value_cache[layer_idx].device self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device)) device = self.conv_states[layer_idx].device self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device)) device = self.ssm_states[layer_idx].device self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device)) # Copied from transformers.models.jamba.modeling_jamba.HybridMambaAttentionDynamicCache.get_seq_length def get_seq_length(self, layer_idx: Optional[int] = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" # take any layer that contains cache and not empty tensor layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx if len(self.key_cache) <= layer_idx: return 0 return self.key_cache[layer_idx].shape[-2] def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class ZambaAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". Adapted from transformers.models.mistral.modeling_mistral.MistralAttention: The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads. The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer (see fig. 2 in https://huggingface.co/papers/2405.16712). Additionally, replaced attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2) """ def __init__(self, config: ZambaConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.attention_hidden_size = config.attention_hidden_size self.head_dim = config.attention_head_dim self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.scaling = (self.head_dim / 2) ** -0.5 self.is_causal = True self.attention_dropout = config.attention_dropout self.q_proj = nn.Linear(config.attention_hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor], past_key_values: Optional[ZambaHybridDynamicCache] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) if past_key_values is not None: key_states, value_states = past_key_values.update(key_states, value_states, layer_idx) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class ZambaMambaMixer(nn.Module): """ Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`. A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective) ∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4, and is why Mamba is called **selective** state spaces) This module differs from `transformers.models.mamba.modeling_mamba.MambaMixer` in two ways: - Added multi-head: the output of `self.in_proj` is split into `self.n_mamba_heads` heads, and each head undergoes an independent forward pass, identical to the original `MambaMixer`, up until the pre-activations of `self.out_proj`. The pre-activations, coming from different mamba heads, are then concatenated and fed into `self.out_proj`. """ def __init__(self, config: ZambaConfig, layer_idx): super().__init__() self.config = config self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.ssm_state_size = config.mamba_d_state self.conv_kernel_size = config.mamba_d_conv self.intermediate_size = config.mamba_expand * config.hidden_size self.time_step_rank = config.mamba_dt_rank self.n_mamba_heads = config.n_mamba_heads self.mamba_head_dim = self.intermediate_size // self.n_mamba_heads self.use_conv_bias = config.mamba_conv_bias self.use_bias = config.mamba_proj_bias self.conv1d = nn.Conv1d( in_channels=self.intermediate_size, out_channels=self.intermediate_size, bias=self.use_conv_bias, kernel_size=self.conv_kernel_size, groups=self.intermediate_size, padding=self.conv_kernel_size - 1, ) self.activation = config.hidden_mamba_act self.act = ACT2FN[config.hidden_mamba_act] self.use_fast_kernels = config.use_mamba_kernels # projection of the input hidden states self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias) # weight associated to the selective projection used to make dt, B and C input dependent # each mamba head is processed independently self.x_proj_weight = nn.Parameter( torch.zeros( self.n_mamba_heads, self.time_step_rank + self.ssm_state_size * 2, self.mamba_head_dim, ) ) # time step projection (discretization) self.dt_proj_weight = nn.Parameter( (torch.zeros(self.n_mamba_heads, self.mamba_head_dim, self.time_step_rank) - 0.5) * 2 / self.time_step_rank**0.5 ) self.dt_proj_bias = nn.Parameter(torch.zeros(self.n_mamba_heads, self.mamba_head_dim)) # S4D real initialization. These are not discretized! # The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(self.intermediate_size, -1).contiguous() self.A_log = nn.Parameter(torch.log(A).reshape(self.n_mamba_heads, self.mamba_head_dim, -1)) self.D = nn.Parameter(torch.ones(self.n_mamba_heads, self.mamba_head_dim)) self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias) if not is_fast_path_available: logger.warning_once( "The fast path is not available because on of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`" " is None. To install follow https://github.com/state-spaces/mamba/#installation and" " https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config" ) def cuda_kernels_forward( self, hidden_states: torch.Tensor, cache_params: ZambaHybridDynamicCache = None, attention_mask=None ): batch_size, seq_len, _ = hidden_states.shape use_precomputed_states = cache_params is not None and cache_params.has_previous_state and seq_len == 1 # 1. Gated linear projection projected_states = self.in_proj(hidden_states).transpose(1, 2) hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2) hidden_states = hidden_states.squeeze(2).contiguous() gate = gate.squeeze(2) gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1) # 2. Convolution sequence transformation conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) if use_precomputed_states: hidden_states = causal_conv1d_update( hidden_states.squeeze(-1), cache_params.conv_states[self.layer_idx], conv_weights, self.conv1d.bias, self.activation, ) hidden_states = hidden_states.unsqueeze(-1) else: if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask.unsqueeze(1) if cache_params is not None: conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.conv_states[self.layer_idx].copy_(conv_states) hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation) if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask.unsqueeze(1) # 3. SSM sequence transformation # 3.a. input varying initialization of time_step, B and C hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1) ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) discrete_time_step = self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2) A = -torch.exp(self.A_log.float()) # 3.c perform the recurrence y ← SSM(A, B, C)(x) time_proj_bias = self.dt_proj_bias.float() if self.dt_proj_bias is not None else None scan_outputs = torch.empty((batch_size, 0, seq_len), device=hidden_states.device, dtype=hidden_states.dtype) if use_precomputed_states: for n in range(self.n_mamba_heads): scan_outputs_ = selective_state_update( cache_params.ssm_states[self.layer_idx][:, n], hidden_states[n, ..., 0], discrete_time_step[n, ..., 0], A[n], B[n, :, 0], C[n, :, 0], self.D[n], gate[n, ..., 0], time_proj_bias[n], dt_softplus=True, ).unsqueeze(-1) scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1) else: ssm_state = torch.empty( (batch_size, 0, self.mamba_head_dim, self.ssm_state_size), device=hidden_states.device, dtype=hidden_states.dtype, ) for n in range(self.n_mamba_heads): scan_outputs_, ssm_state_ = selective_scan_fn( hidden_states[n], discrete_time_step[n], A[n], B[n].transpose(1, 2), C[n].transpose(1, 2), self.D[n].float(), gate[n], time_proj_bias[n], delta_softplus=True, return_last_state=True, ) scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1).contiguous() ssm_state = torch.cat((ssm_state, ssm_state_.unsqueeze(1)), dim=1) if ssm_state is not None and cache_params is not None: cache_params.ssm_states[self.layer_idx].copy_(ssm_state) # 4. Final linear projection contextualized_states = self.out_proj(scan_outputs.transpose(1, 2)) return contextualized_states def slow_forward(self, input_states, cache_params: ZambaHybridDynamicCache = None, attention_mask=None): batch_size, seq_len, _ = input_states.shape dtype = input_states.dtype # 1. Gated linear projection projected_states = self.in_proj(input_states).transpose(1, 2) hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2) hidden_states = hidden_states.squeeze(2).contiguous() gate = gate.squeeze(2) gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1) use_cache = isinstance(cache_params, ZambaHybridDynamicCache) # 2. Convolution sequence transformation if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size: if self.training: # In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass ssm_state = cache_params.ssm_states[self.layer_idx].clone() else: ssm_state = cache_params.ssm_states[self.layer_idx] ssm_state = ssm_state.to(hidden_states.device) if ( cache_params.has_previous_state and seq_len == 1 and cache_params.conv_states[self.layer_idx].shape[0] == batch_size ): conv_state = cache_params.conv_states[self.layer_idx] conv_state = torch.roll(conv_state, shifts=-1, dims=-1) conv_state[:, :, -1] = hidden_states[:, :, 0] cache_params.conv_states[self.layer_idx] = conv_state hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1) if self.use_conv_bias: hidden_states += self.conv1d.bias hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1) else: if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1] :].unsqueeze(1) conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)) cache_params.conv_states[self.layer_idx] = conv_state hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1] :].unsqueeze(1) else: ssm_state = torch.zeros( (batch_size, self.n_mamba_heads, self.mamba_head_dim, self.ssm_state_size), device=hidden_states.device, dtype=dtype, ) if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask.unsqueeze(1) hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) if attention_mask is not None and not torch.all(attention_mask == 1): hidden_states = hidden_states * attention_mask.unsqueeze(1) # 3. State Space Model sequence transformation # 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2] hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1) ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2) time_step, B, C = torch.split( ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1 ) discrete_time_step = (self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2)) + self.dt_proj_bias[ :, None, :, None ] discrete_time_step = nn.functional.softplus(discrete_time_step) # 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM) A = -torch.exp(self.A_log.float()) discrete_A = torch.exp(A[:, None, :, None, :] * discrete_time_step[:, :, :, :, None]) discrete_B = discrete_time_step[:, :, :, :, None] * B[:, :, None, :, :].float() deltaB_u = discrete_B * hidden_states[:, :, :, :, None].float() # 3.c perform the recurrence y ← SSM(A, B, C)(x) scan_outputs = [] for i in range(seq_len): ssm_state = discrete_A[:, :, :, i, :].transpose(0, 1) * ssm_state + deltaB_u[:, :, :, i, :].transpose(0, 1) scan_output = torch.matmul(ssm_state.transpose(0, 1).to(dtype), C[:, :, i, :].unsqueeze(-1)) scan_outputs.append(scan_output[:, :, :, 0]) scan_output = torch.stack(scan_outputs, dim=-1) scan_output = scan_output + (hidden_states * self.D[:, None, :, None]) scan_output = scan_output * self.act(gate) if use_cache: cache_params.ssm_states[self.layer_idx] = ssm_state # 4. Final linear projection contextualized_states = self.out_proj( scan_output.transpose(0, 1).reshape(batch_size, -1, seq_len).transpose(1, 2) ) return contextualized_states def forward(self, hidden_states, cache_params: ZambaHybridDynamicCache = None, attention_mask=None): if self.use_fast_kernels: if not is_fast_path_available or "cuda" not in self.x_proj_weight.device.type: raise ValueError( "Fast Mamba kernels are not available. Make sure to they are installed and that " "the mamba module is on a CUDA device. lease run 'pip install causal-conv1d>=1.2.0' " "and 'pip install mamba-ssm', or set use_mamba_kernels=False in the model's config." ) return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask=attention_mask) return self.slow_forward(hidden_states, cache_params, attention_mask=attention_mask) # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Zamba class ZambaMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class ZambaAttentionDecoderLayer(nn.Module): def __init__(self, config: ZambaConfig, layer_idx: Optional[int] = None): super().__init__() self.self_attn = ZambaAttention(config, layer_idx) self.feed_forward = ZambaMLP(config) self.input_layernorm = ZambaRMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps) self.pre_ff_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`. This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The concatenated tensor is then used as input of the pre-attention RMSNorm (see fig. 2 in https://huggingface.co/papers/2405.16712). layer_idx (`int`): layer_idx in the forward pass. Used to distinguish Zamba's tied transformer layers. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1) hidden_states = self.input_layernorm(hidden_states) hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, **kwargs, ) # feed-forward (MLP) hidden_states = self.pre_ff_layernorm(hidden_states) hidden_states = self.feed_forward(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs class ZambaMambaDecoderLayer(nn.Module): def __init__(self, config: ZambaConfig, layer_idx: int): super().__init__() self.mamba = ZambaMambaMixer(config=config, layer_idx=layer_idx) self.input_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.layer_idx = layer_idx @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, transformer_hidden_states: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ residual = hidden_states # `transformer_hidden_states` is the output from shared transformer + linear layer (see fig. 2 in https://huggingface.co/papers/2405.16712). # `transformer_hidden_states` is then added to the input to the mamba layer below (as described in eq. (6) of https://huggingface.co/papers/2405.16712). hidden_states = ( hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states ) hidden_states = self.input_layernorm(hidden_states) hidden_states = self.mamba( hidden_states=hidden_states, cache_params=past_key_values, attention_mask=attention_mask, ) self_attn_weights = None # residual connection after mamba hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (past_key_values,) return outputs class ZambaHybridLayer(nn.Module): def __init__(self, shared_transf: ZambaAttentionDecoderLayer, linear: nn.Linear, mamba: ZambaMambaDecoderLayer): super().__init__() self.shared_transf = shared_transf self.linear = linear self.mamba_decoder = mamba @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor] = None, layer_idx: Optional[int] = None, attention_mask: Optional[torch.Tensor] = None, causal_mask: Optional[torch.Tensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer. layer_idx (`int`): layer number. attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, sequence_length)` where padding elements are indicated by 0. past_key_values (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. """ layer_outputs = self.shared_transf( hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) transformer_hidden_states = layer_outputs[0] if output_attentions: self_attn_weights = layer_outputs[1] transformer_hidden_states = self.linear(transformer_hidden_states) layer_outputs = self.mamba_decoder( hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) if output_attentions: layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:] return layer_outputs @auto_docstring class ZambaPreTrainedModel(PreTrainedModel): config: ZambaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["ZambaAttentionDecoderLayer", "ZambaMambaDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = False _supports_sdpa = False # Note: only supports ZambaHybridDynamicCache _is_stateful = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, ZambaRMSNorm): module.weight.data.fill_(1.0) elif isinstance(module, ZambaMambaMixer): module.x_proj_weight.data.normal_(mean=0.0, std=std) dt_init_std = self.config.mamba_dt_rank**-0.5 nn.init.uniform_(module.dt_proj_weight, -dt_init_std, dt_init_std) mamba_head_dim = self.config.mamba_expand * self.config.hidden_size // self.config.n_mamba_heads dt = torch.exp( torch.rand(self.config.n_mamba_heads, mamba_head_dim) * (math.log(self.config.time_step_max) - math.log(self.config.time_step_min)) + math.log(self.config.time_step_min) ).clamp(min=self.config.time_step_floor) # # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 inv_dt = dt + torch.log(-torch.expm1(-dt)) module.dt_proj_bias.data.copy_(inv_dt) A = torch.arange(1, module.ssm_state_size + 1, dtype=torch.float32)[None, :] A = A.expand(module.intermediate_size, -1).contiguous() module.A_log.data.copy_(torch.log(A).reshape(module.n_mamba_heads, module.mamba_head_dim, -1)) module.D.data.fill_(1.0) @auto_docstring class ZambaModel(ZambaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ZambaDecoderLayer`] Args: config: ZambaConfig """ def __init__(self, config: ZambaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) block = ZambaAttentionDecoderLayer(config) mamba_layers = [] linear_layers = [] self.layers_block_type = config.layers_block_type for i in range(config.num_hidden_layers): if config.layers_block_type[i] == "mamba": mamba_layers.append(ZambaMambaDecoderLayer(config, layer_idx=i)) elif config.layers_block_type[i] == "hybrid": linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False)) mamba_layers.append(ZambaMambaDecoderLayer(config, layer_idx=i)) mamba_layers = iter(mamba_layers) linear_layers = iter(linear_layers) layers = [] self._tied_weights_keys = [] for layer_id, layer_type in enumerate(self.layers_block_type): if layer_type == "hybrid": prefix_name = f"layers.{layer_id}." tied_keys = [ "shared_transf.self_attn.q_proj.weight", "shared_transf.self_attn.k_proj.weight", "shared_transf.self_attn.v_proj.weight", "shared_transf.self_attn.o_proj.weight", "shared_transf.feed_forward.gate_proj.weight", "shared_transf.feed_forward.up_proj.weight", "shared_transf.feed_forward.down_proj.weight", "shared_transf.input_layernorm.weight", "shared_transf.pre_ff_layernorm.weight", ] self._tied_weights_keys = [*self._tied_weights_keys, *[prefix_name + key for key in tied_keys]] layers.append(ZambaHybridLayer(block, next(linear_layers), next(mamba_layers))) else: layers.append(next(mamba_layers)) self.layers = nn.ModuleList(layers) self._attn_implementation = config._attn_implementation self.final_layernorm = ZambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds original_hidden_states = torch.clone(inputs_embeds) # original_hidden_states: word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer if use_cache and past_key_values is None: logger.warning_once( "Zamba requires an initialized `ZambaHybridDynamicCache` to return a cache. None was " "provided, so no cache will be returned." ) if cache_position is None: cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for layer_idx, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer.__call__, hidden_states, original_hidden_states, layer_idx, attention_mask, causal_mask, past_key_values, output_attentions, use_cache, cache_position, ) else: layer_outputs = layer( hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, causal_mask=causal_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: if layer_outputs[1] is not None: # append attentions only of attention layers. Mamba layers return `None` as the attention weights all_self_attns += (layer_outputs[1],) hidden_states = self.final_layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if past_key_values and not past_key_values.has_previous_state: past_key_values.has_previous_state = True output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attns, ) return output if return_dict else output.to_tuple() # Copied from transformers.models.jamba.modeling_jamba.JambaModel._update_causal_mask def _update_causal_mask(self, attention_mask, input_tensor, cache_position): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and 0.0 in attention_mask: return attention_mask return None dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] target_length = cache_position[-1] + 1 causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.dim() == 2: mask_length = attention_mask.shape[-1] padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask # Adapted from transformers.models.jamba.modeling_jamba.JambaForCausalLM with Jamba->Zamba, JAMBA->ZAMBA class ZambaForCausalLM(ZambaPreTrainedModel, GenerationMixin): def __init__(self, config: ZambaConfig): super().__init__(config) self.model = ZambaModel(config) self._tied_weights_keys = ["lm_head.weight", *self.model._tied_weights_keys] self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[ZambaHybridDynamicCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> Union[tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, ZambaForCausalLM >>> model = ZambaForCausalLM.from_pretrained("Zyphra/Zamba-7B-v1") >>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba-7B-v1") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=return_dict, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.vocab_size, **kwargs) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, position_ids=None, use_cache=True, **kwargs, ): # Overwritten -- has a unique cache type, `ZambaHybridDynamicCache` empty_past_kv = past_key_values is None # Omit tokens covered by past_key_values if not empty_past_kv: # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens # Exception 1: when passing input_embeds, input_ids may be missing entries # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here # Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case. # (we can't check exception 3 while compiling) if ( inputs_embeds is not None # Exception 1 or cache_position[-1] >= input_ids.shape[1] # Exception 3 ): input_ids = input_ids[:, -cache_position.shape[0] :] elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] else: past_key_values = ZambaHybridDynamicCache( self.config, input_ids.shape[0], dtype=self.dtype, device=self.device ) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if not empty_past_kv: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and empty_past_kv: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "attention_mask": attention_mask, "logits_to_keep": self.config.num_logits_to_keep, "cache_position": cache_position, } ) return model_inputs @auto_docstring( custom_intro=""" The Zamba Model with a sequence classification head on top (linear layer). [`ZambaForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class ZambaForSequenceClassification(ZambaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.model = ZambaModel(config) self._tied_weights_keys = self.model._tied_weights_keys self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = ["ZambaForCausalLM", "ZambaForSequenceClassification", "ZambaModel", "ZambaPreTrainedModel"]
transformers/src/transformers/models/zamba/modeling_zamba.py/0
{ "file_path": "transformers/src/transformers/models/zamba/modeling_zamba.py", "repo_id": "transformers", "token_count": 27806 }
531
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ctypes import c_float, sizeof from enum import Enum from typing import TYPE_CHECKING, Optional, Union if TYPE_CHECKING: from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore class ParameterFormat(Enum): Float = c_float @property def size(self) -> int: """ Number of byte required for this data type Returns: Integer > 0 """ return sizeof(self.value) def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int: """ Args: dimension: fixed_dimension: num_token_to_add: Returns: """ # < 0 is possible if using a dynamic axis if dimension <= 0: dimension = fixed_dimension dimension -= num_token_to_add return dimension def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int: """ Compute the size taken by all the parameters in the given the storage format when serializing the model Args: num_parameters: Number of parameters to be saved dtype: The data format each parameter will be saved Returns: Size (in byte) taken to save all the parameters """ return num_parameters * dtype.size def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: """ Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. Args: model_name (`str`): Name of the model for which a preprocessor are loaded. Returns: `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns `None` if no preprocessor is found. """ # Avoid circular imports by only importing this here. from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore try: return AutoProcessor.from_pretrained(model_name) except (ValueError, OSError, KeyError): tokenizer, feature_extractor = None, None try: tokenizer = AutoTokenizer.from_pretrained(model_name) except (OSError, KeyError): pass try: feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) except (OSError, KeyError): pass if tokenizer is not None and feature_extractor is not None: raise ValueError( f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." ) elif tokenizer is None and feature_extractor is None: return None elif tokenizer is not None: return tokenizer else: return feature_extractor
transformers/src/transformers/onnx/utils.py/0
{ "file_path": "transformers/src/transformers/onnx/utils.py", "repo_id": "transformers", "token_count": 1291 }
532
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Union, overload import numpy as np from ..utils import ( add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ImageToImagePipeline(Pipeline): """ Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous image input. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import pipeline >>> upscaler = pipeline("image-to-image", model="caidas/swin2SR-classical-sr-x2-64") >>> img = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) >>> img = img.resize((64, 64)) >>> upscaled_img = upscaler(img) >>> img.size (64, 64) >>> upscaled_img.size (144, 144) ``` This image to image pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-to-image"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-to-image). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) def _sanitize_parameters(self, **kwargs): preprocess_params = {} postprocess_params = {} forward_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] if "head_mask" in kwargs: forward_params["head_mask"] = kwargs["head_mask"] return preprocess_params, forward_params, postprocess_params @overload def __call__(self, images: Union[str, "Image.Image"], **kwargs: Any) -> "Image.Image": ... @overload def __call__(self, images: Union[list[str], list["Image.Image"]], **kwargs: Any) -> list["Image.Image"]: ... def __call__( self, images: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs: Any ) -> Union["Image.Image", list["Image.Image"]]: """ Transform the image(s) passed as inputs. Args: images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and the call may block forever. Return: An image (Image.Image) or a list of images (list["Image.Image"]) containing result(s). If the input is a single image, the return will be also a single image, if the input is a list of several images, it will return a list of transformed images. """ return super().__call__(images, **kwargs) def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) inputs = self.image_processor(images=[image], return_tensors="pt") if self.framework == "pt": inputs = inputs.to(self.dtype) return inputs def postprocess(self, model_outputs): images = [] if "reconstruction" in model_outputs: outputs = model_outputs.reconstruction for output in outputs: output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() output = np.moveaxis(output, source=0, destination=-1) output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 images.append(Image.fromarray(output)) return images if len(images) > 1 else images[0]
transformers/src/transformers/pipelines/image_to_image.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_to_image.py", "repo_id": "transformers", "token_count": 2059 }
533
import warnings from collections import UserDict from typing import Any, Union, overload from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES from ..tf_utils import stable_softmax logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class ZeroShotImageClassificationPipeline(Pipeline): """ Zero shot image classification pipeline using `CLIPModel`. This pipeline predicts the class of an image when you provide an image and a set of `candidate_labels`. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="google/siglip-so400m-patch14-384") >>> classifier( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["animals", "humans", "landscape"], ... ) [{'score': 0.965, 'label': 'animals'}, {'score': 0.03, 'label': 'humans'}, {'score': 0.005, 'label': 'landscape'}] >>> classifier( ... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", ... candidate_labels=["black and white", "photorealist", "painting"], ... ) [{'score': 0.996, 'label': 'black and white'}, {'score': 0.003, 'label': 'photorealist'}, {'score': 0.0, 'label': 'painting'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"zero-shot-image-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-image-classification). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = True def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES ) @overload def __call__( self, image: Union[str, "Image.Image"], candidate_labels: list[str], **kwargs: Any ) -> list[dict[str, Any]]: ... @overload def __call__( self, image: Union[list[str], list["Image.Image"]], candidate_labels: list[str], **kwargs: Any ) -> list[list[dict[str, Any]]]: ... def __call__( self, image: Union[str, list[str], "Image.Image", list["Image.Image"]], candidate_labels: list[str], **kwargs: Any, ) -> Union[list[dict[str, Any]], list[list[dict[str, Any]]]]: """ Assign labels to the image(s) passed as inputs. Args: image (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly candidate_labels (`list[str]`): The candidate labels for this image. They will be formatted using *hypothesis_template*. hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`): The format used in conjunction with *candidate_labels* to attempt the image classification by replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are already formatted. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A list of dictionaries containing one entry per proposed label. Each dictionary contains the following keys: - **label** (`str`) -- One of the suggested *candidate_labels*. - **score** (`float`) -- The score attributed by the model to that label. It is a value between 0 and 1, computed as the `softmax` of `logits_per_image`. """ # After deprecation of this is completed, remove the default `None` value for `image` if "images" in kwargs: image = kwargs.pop("images") if image is None: raise ValueError("Cannot call the zero-shot-image-classification pipeline without an images argument!") return super().__call__(image, candidate_labels=candidate_labels, **kwargs) def _sanitize_parameters(self, tokenizer_kwargs=None, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] if tokenizer_kwargs is not None: warnings.warn( "The `tokenizer_kwargs` argument is deprecated and will be removed in version 5 of Transformers", FutureWarning, ) preprocess_params["tokenizer_kwargs"] = tokenizer_kwargs return preprocess_params, {}, {} def preprocess( self, image, candidate_labels=None, hypothesis_template="This is a photo of {}.", timeout=None, tokenizer_kwargs=None, ): if tokenizer_kwargs is None: tokenizer_kwargs = {} image = load_image(image, timeout=timeout) inputs = self.image_processor(images=[image], return_tensors=self.framework) if self.framework == "pt": inputs = inputs.to(self.dtype) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] tokenizer_default_kwargs = {"padding": True} if "siglip" in self.model.config.model_type: tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True) tokenizer_default_kwargs.update(tokenizer_kwargs) text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: # Batching case. text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt" and "siglip" in self.model.config.model_type: probs = torch.sigmoid(logits).squeeze(-1) scores = probs.tolist() if not isinstance(scores, list): scores = [scores] elif self.framework == "pt": probs = logits.softmax(dim=-1).squeeze(-1) scores = probs.tolist() if not isinstance(scores, list): scores = [scores] elif self.framework == "tf": probs = stable_softmax(logits, axis=-1) scores = probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
transformers/src/transformers/pipelines/zero_shot_image_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/zero_shot_image_classification.py", "repo_id": "transformers", "token_count": 3624 }
534
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING, Any, Optional from .base import HfQuantizer if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import is_accelerate_available, is_fbgemm_gpu_available, is_torch_available, logging from .quantizers_utils import get_module_from_name if is_torch_available(): import torch logger = logging.get_logger(__name__) class FbgemmFp8HfQuantizer(HfQuantizer): """ FP8 quantization using fbgemm kernels """ requires_parameters_quantization = True requires_calibration = False required_packages = ["fbgemm-gpu", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) self.quantization_config = quantization_config def validate_environment(self, *args, **kwargs): if not is_torch_available(): raise ImportError( "Using fbgemm fp8 quantization requires torch >= 2.1.0" "Please install the latest version of torch ( pip install --upgrade torch )" ) if not is_fbgemm_gpu_available(): raise ImportError( "Using fbgemm fp8 quantization requires fbgemm-gpu library" "Please install the latest version of fbgemm-gpu library by following : https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries" ) if not is_accelerate_available("0.32.2"): raise ImportError( "Loading an FP8 quantized model requires accelerate > 0.32.1 (`pip install --upgrade accelerate`)" ) if not torch.cuda.is_available(): raise RuntimeError("Using FP8 quantized models with fbgemm kernels requires a GPU") compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 9: raise ValueError( "FP8 quantized models is only supported on GPUs with compute capability >= 9.0 (e.g H100)" ) device_map = kwargs.get("device_map") if device_map is None: logger.warning_once( "You have loaded an FP8 model on CPU and have a CUDA device available, make sure to set " "your model on a GPU device in order to run your model. To remove this warning, pass device_map = 'cuda'. " ) elif device_map is not None: if ( not self.pre_quantized and isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()) ): raise ValueError( "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device." "This is not supported when the model is quantized on the fly. " "Please use a quantized checkpoint or remove the CPU or disk device from the device_map." ) def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: dtype = torch.bfloat16 logger.info( "Overriding dtype=%s with `dtype=torch.bloat16` due to " "requirements of `fbgemm-gpu` to enable model loading in fp8. " "Pass your own dtype to specify the dtype of the remaining non-linear layers or pass" " dtype=torch.bfloat16 to remove this warning.", dtype, ) elif dtype == torch.float16: raise ValueError( "You cannot use FP8 with dtype=torch.float16.We recommend you passing dtype=torch.bfloat16" ) return dtype def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ): from ..integrations import FbgemmFp8Linear, FbgemmFp8Llama4TextExperts module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, FbgemmFp8Linear): if self.pre_quantized or tensor_name == "bias": if tensor_name == "weight" and param_value.dtype != torch.float8_e4m3fn: raise ValueError("Expect quantized weights but got an unquantized weight") return False else: if tensor_name == "weight_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True if isinstance(module, FbgemmFp8Llama4TextExperts): if self.pre_quantized or tensor_name == "bias": return False else: if tensor_name == "gate_up_proj_scale" or tensor_name == "down_proj_scale": raise ValueError("Expect unquantized weights but got a quantized weight_scale") return True return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: dict[str, Any], unexpected_keys: Optional[list[str]] = None, ): """ Quantizes weights into weight and weight_scale """ from ..integrations import FbgemmFp8Llama4TextExperts module, tensor_name = get_module_from_name(model, param_name) if isinstance(module, FbgemmFp8Llama4TextExperts): if tensor_name == "gate_up_proj": # Process each expert separately # Transpose the second and third dimension transposed_param = param_value.transpose(1, 2) # Reshape to 2D for quantization original_shape = transposed_param.shape flattened_param = transposed_param.reshape(-1, original_shape[-1]) # Quantize using per row instead of per column new_value_flat, weight_scale_flat = torch.ops.fbgemm.quantize_fp8_per_row(flattened_param) # Reshape back to original dimensions new_value = new_value_flat.reshape(original_shape) new_value = new_value.transpose(1, 2) weight_scale = weight_scale_flat.reshape(original_shape[0], 1, original_shape[1]) elif tensor_name == "down_proj": # Process each expert separately # Transpose the weights for proper quantization transposed_param = param_value.transpose(1, 2) # Reshape to 2D for quantization original_shape = transposed_param.shape flattened_param = transposed_param.reshape(-1, original_shape[-1]) # Quantize using per column new_value_flat, weight_scale_flat = torch.ops.fbgemm.quantize_fp8_per_row(flattened_param) # Reshape back to original dimensions new_value = new_value_flat.reshape(original_shape) new_value = new_value.transpose(1, 2) weight_scale = weight_scale_flat.reshape(original_shape[0], original_shape[1], 1) module._parameters[f"{tensor_name}_scale"] = torch.nn.Parameter(weight_scale.to(target_device)) else: new_value, weight_scale = torch.ops.fbgemm.quantize_fp8_per_row(param_value) module._parameters[f"{tensor_name}_scale"] = torch.nn.Parameter( weight_scale.view(weight_scale.shape[0], 1).to(target_device) ) module._parameters[tensor_name] = torch.nn.Parameter(new_value.to(target_device)) if unexpected_keys is not None and param_name in unexpected_keys: unexpected_keys.remove(param_name) del param_name def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): return model def _process_model_before_weight_loading( self, model: "PreTrainedModel", keep_in_fp32_modules: Optional[list[str]] = None, **kwargs, ): from ..integrations import replace_with_fbgemm_fp8_linear tp_plan = model._tp_plan self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules ) config = model.config model = replace_with_fbgemm_fp8_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config, pre_quantized=self.pre_quantized, config=config, tp_plan=tp_plan, ) model.config.quantization_config = self.quantization_config def update_missing_keys(self, model, missing_keys: list[str], prefix: str) -> list[str]: from ..integrations import FbgemmFp8Linear, FbgemmFp8Llama4TextExperts not_missing_keys = [] for name, module in model.named_modules(): if isinstance(module, (FbgemmFp8Linear, FbgemmFp8Llama4TextExperts)): for missing in missing_keys: if ( (name in missing or name in f"{prefix}.{missing}") and not missing.endswith(".weight") and not missing.endswith(".bias") ): not_missing_keys.append(missing) return [k for k in missing_keys if k not in not_missing_keys] def update_tp_plan(self, config): if "Llama4" in config.__class__.__name__: text_plan = { # We are using a different tp plan with local_colwise and local_rowwise for the attention because fbgemm operations cannot be parallelized # With local_colwise and local_rowwise, all the operations are done locally, and we add a gather operation to gather the results instead of # using dtensors "layers.*.self_attn.q_proj.weight": "local_colwise", "layers.*.self_attn.q_proj.weight_scale": "local_colwise", "layers.*.self_attn.k_proj.weight": "local_colwise", "layers.*.self_attn.k_proj.weight_scale": "local_colwise", "layers.*.self_attn.v_proj.weight": "local_colwise", "layers.*.self_attn.v_proj.weight_scale": "local_colwise", "layers.*.self_attn.o_proj.weight": "local_rowwise", "layers.*.self_attn": "gather", # We keep the same sequence_parallel plan for layernorms "layers.*.input_layernorm.weight": "sequence_parallel", "layers.*.post_attention_layernorm.weight": "sequence_parallel", "norm.weight": "sequence_parallel", # We keep the same local_colwise and local_rowwise plan for the feed forward shared expert # We also add scales for the shared expert, for local_colwise the scale is also local_colwise # For local_rowwise the scale is replicated, so we don't need to add it "layers.*.feed_forward.shared_expert.gate_proj.weight": "local_colwise", "layers.*.feed_forward.shared_expert.gate_proj.weight_scale": "local_colwise", "layers.*.feed_forward.shared_expert.up_proj.weight": "local_colwise", "layers.*.feed_forward.shared_expert.up_proj.weight_scale": "local_colwise", "layers.*.feed_forward.shared_expert.down_proj.weight": "local_rowwise", "layers.*.feed_forward.experts": "local", "layers.*.feed_forward": "gather", "layers.*.feed_forward.experts.*.gate_proj.weight": "local_colwise", "layers.*.feed_forward.experts.*.gate_proj.weight_scale": "local_colwise", "layers.*.feed_forward.experts.*.up_proj.weight": "local_colwise", "layers.*.feed_forward.experts.*.up_proj.weight_scale": "local_colwise", "layers.*.feed_forward.experts.*.down_proj.weight": "local_rowwise", # For Fused implementation we use local_packed_rowwise for the gate_up_proj, and the same for the packed scales # We use local_colwise for the down_proj, and the scales are replicated so we don't add them "layers.*.feed_forward.experts.gate_up_proj": "local_packed_rowwise", "layers.*.feed_forward.experts.gate_up_proj_scale": "local_packed_rowwise", "layers.*.feed_forward.experts.down_proj": "local_colwise", } if config.get_text_config() is not None: config.get_text_config().base_model_tp_plan = text_plan else: config.base_model_tp_plan = text_plan return config return config def is_serializable(self, safe_serialization=None): return True @property def is_trainable(self) -> bool: return False
transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_fbgemm_fp8.py", "repo_id": "transformers", "token_count": 6203 }
535
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging logger = logging.get_logger(__name__) # TODO: should be moved to `utils` after refactoring of SageMakerTrainer def is_sagemaker_model_parallel_available(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class SageMakerTrainingArguments(TrainingArguments): mp_parameters: str = field( default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"}, ) def __post_init__(self): super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead.", FutureWarning, ) @cached_property def _setup_devices(self) -> "torch.device": logger.info("PyTorch: setting up devices") if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: device = torch.device("cpu") self._n_gpu = 0 elif is_sagemaker_model_parallel_available(): local_rank = smp.local_rank() device = torch.device("cuda", local_rank) self._n_gpu = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta) self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK")) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. self._n_gpu = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 if device.type == "cuda": torch.cuda.set_device(device) return device @property def world_size(self): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def place_model_on_device(self): return not is_sagemaker_model_parallel_available() @property def _no_sync_in_gradient_accumulation(self): return False
transformers/src/transformers/sagemaker/training_args_sm.py/0
{ "file_path": "transformers/src/transformers/sagemaker/training_args_sm.py", "repo_id": "transformers", "token_count": 2130 }
536
#!/usr/bin/env python # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import lru_cache from huggingface_hub import get_full_repo_name # for backward compatibility from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility from packaging import version from .. import __version__ from .auto_docstring import ( ClassAttrs, ClassDocstring, ImageProcessorArgs, ModelArgs, ModelOutputArgs, auto_class_docstring, auto_docstring, get_args_doc_from_source, parse_docstring, set_min_indent, ) from .backbone_utils import BackboneConfigMixin, BackboneMixin from .chat_template_utils import DocstringParsingException, TypeHintParsingException, get_json_schema from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, TransformersKwargs, cached_property, can_return_loss, can_return_tuple, expand_dims, filter_out_non_signature_kwargs, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_timm_config_dict, is_timm_local_checkpoint, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, torch_float, torch_int, transpose, working_or_temp_dir, ) from .hub import ( CHAT_TEMPLATE_DIR, CHAT_TEMPLATE_FILE, CLOUDFRONT_DISTRIB_PREFIX, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, LEGACY_PROCESSOR_CHAT_TEMPLATE_FILE, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushInProgress, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, has_file, http_user_agent, is_offline_mode, is_remote_url, list_repo_templates, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ACCELERATE_MIN_VERSION, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, GGUF_MIN_VERSION, TORCH_FX_REQUIRED_VERSION, TRITON_MIN_VERSION, USE_JAX, USE_TF, USE_TORCH, XLA_FSDPV2_MIN_VERSION, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, check_torch_load_is_safe, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_apollo_torch_available, is_aqlm_available, is_auto_awq_available, is_auto_gptq_available, is_auto_round_available, is_av_available, is_bitsandbytes_available, is_bitsandbytes_multi_backend_available, is_bs4_available, is_ccl_available, is_coloredlogs_available, is_compressed_tensors_available, is_cuda_platform, is_cv2_available, is_cython_available, is_datasets_available, is_decord_available, is_detectron2_available, is_eetq_available, is_essentia_available, is_faiss_available, is_fbgemm_gpu_available, is_flash_attn_2_available, is_flash_attn_3_available, is_flash_attn_greater_or_equal, is_flash_attn_greater_or_equal_2_10, is_flax_available, is_flute_available, is_fp_quant_available, is_fsdp_available, is_ftfy_available, is_g2p_en_available, is_galore_torch_available, is_gguf_available, is_gptqmodel_available, is_grokadamw_available, is_habana_gaudi1, is_hadamard_available, is_hqq_available, is_huggingface_hub_greater_or_equal, is_in_notebook, is_ipex_available, is_jieba_available, is_jinja_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_kernels_available, is_levenshtein_available, is_libcst_available, is_librosa_available, is_liger_kernel_available, is_lomo_available, is_matplotlib_available, is_mistral_common_available, is_mlx_available, is_natten_available, is_ninja_available, is_nltk_available, is_num2words_available, is_onnx_available, is_openai_available, is_optimum_available, is_optimum_quanto_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_pretty_midi_available, is_protobuf_available, is_psutil_available, is_py3nvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_quanto_greater, is_quark_available, is_qutlass_available, is_rich_available, is_rjieba_available, is_rocm_platform, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_schedulefree_available, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_available, is_spacy_available, is_speech_available, is_spqr_available, is_sudachi_available, is_sudachi_projection_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tf2onnx_available, is_tf_available, is_tiktoken_available, is_timm_available, is_tokenizers_available, is_torch_accelerator_available, is_torch_available, is_torch_bf16_available, is_torch_bf16_available_on_device, is_torch_bf16_cpu_available, is_torch_bf16_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_deterministic, is_torch_flex_attn_available, is_torch_fp16_available_on_device, is_torch_fx_available, is_torch_fx_proxy, is_torch_greater_or_equal, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_optimi_available, is_torch_sdpa_available, is_torch_tensorrt_fx_available, is_torch_tf32_available, is_torch_xla_available, is_torch_xpu_available, is_torchao_available, is_torchaudio_available, is_torchcodec_available, is_torchdistx_available, is_torchdynamo_available, is_torchdynamo_compiling, is_torchdynamo_exporting, is_torchvision_available, is_torchvision_v2_available, is_training_run_on_sagemaker, is_triton_available, is_uroman_available, is_vision_available, is_vptq_available, is_xlstm_available, is_yt_dlp_available, requires_backends, torch_only_method, ) from .peft_utils import ( ADAPTER_CONFIG_NAME, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, check_peft_version, find_adapter_config_file, ) WEIGHTS_NAME = "pytorch_model.bin" WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" TF2_WEIGHTS_NAME = "tf_model.h5" TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json" TF_WEIGHTS_NAME = "model.ckpt" FLAX_WEIGHTS_NAME = "flax_model.msgpack" FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json" SAFE_WEIGHTS_NAME = "model.safetensors" SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" CONFIG_NAME = "config.json" FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" IMAGE_PROCESSOR_NAME = "preprocessor_config.json" VIDEO_PROCESSOR_NAME = "video_preprocessor_config.json" AUDIO_TOKENIZER_NAME = "audio_tokenizer_config.json" PROCESSOR_NAME = "processor_config.json" GENERATION_CONFIG_NAME = "generation_config.json" MODEL_CARD_NAME = "modelcard.json" SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility MULTIPLE_CHOICE_DUMMY_INPUTS = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def check_min_version(min_version): if version.parse(__version__) < version.parse(min_version): if "dev" in min_version: error_message = ( "This example requires a source install from HuggingFace Transformers (see " "`https://huggingface.co/docs/transformers/installation#install-from-source`)," ) else: error_message = f"This example requires a minimum version of {min_version}," error_message += f" but the version found is {__version__}.\n" raise ImportError( error_message + "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other " "versions of HuggingFace Transformers." ) @lru_cache def get_available_devices() -> frozenset[str]: """ Returns a frozenset of devices available for the current PyTorch installation. """ devices = {"cpu"} # `cpu` is always supported as a device in PyTorch if is_torch_cuda_available(): devices.add("cuda") if is_torch_mps_available(): devices.add("mps") if is_torch_xpu_available(): devices.add("xpu") if is_torch_npu_available(): devices.add("npu") if is_torch_hpu_available(): devices.add("hpu") if is_torch_mlu_available(): devices.add("mlu") if is_torch_musa_available(): devices.add("musa") return frozenset(devices)
transformers/src/transformers/utils/__init__.py/0
{ "file_path": "transformers/src/transformers/utils/__init__.py", "repo_id": "transformers", "token_count": 4581 }
537
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class AlbertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BarthezTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BartphoTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BertGenerationTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BigBirdTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CamembertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CodeLlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CpmTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class DebertaV2Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ErnieMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMProphetNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class FNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GemmaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GPTSw3Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LayoutXLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class M2M100Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MarianTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBart50Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MLukeTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class NllbTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PegasusTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PLBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ReformerTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class RemBertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SeamlessM4TTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SiglipTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class Speech2TextTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SpeechT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class T5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class UdopTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XGLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMRobertaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"])
transformers/src/transformers/utils/dummy_sentencepiece_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_sentencepiece_objects.py", "repo_id": "transformers", "token_count": 2512 }
538
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] # Duplicate check duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) # Missing blocks missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] if len(duplicate_blocks) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(duplicate_blocks) ) if len(missing_blocks) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) if len(extra_blocks) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(extra_blocks) ) def get_device_map(n_layers, devices): """Returns a dictionary of layers distributed evenly across all devices.""" layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] return dict(zip(devices, layers_list))
transformers/src/transformers/utils/model_parallel_utils.py/0
{ "file_path": "transformers/src/transformers/utils/model_parallel_utils.py", "repo_id": "transformers", "token_count": 771 }
539
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Adding a new model This page has been updated in light of the removal of the `add_new_model` script in favor of the more complete `add_new_model_like` script. We recommend you check out the documentation on [how to add a model](https://huggingface.co/docs/transformers/main/en/add_new_model) for complete and up-to-date instructions.
transformers/templates/adding_a_new_model/README.md/0
{ "file_path": "transformers/templates/adding_a_new_model/README.md", "repo_id": "transformers", "token_count": 253 }
540
{ "model_type": "roberta" }
transformers/tests/fixtures/dummy-config.json/0
{ "file_path": "transformers/tests/fixtures/dummy-config.json", "repo_id": "transformers", "token_count": 15 }
541
# Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import logging import os import tempfile import unittest import warnings from huggingface_hub import HfFolder, create_pull_request from parameterized import parameterized from transformers import AutoConfig, GenerationConfig, WatermarkingConfig, is_torch_available from transformers import logging as transformers_logging if is_torch_available(): import torch from transformers.generation import ( ClassifierFreeGuidanceLogitsProcessor, EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GenerationMode, HammingDiversityLogitsProcessor, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, MinPLogitsWarper, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, WatermarkLogitsProcessor, ) from transformers.testing_utils import ( TOKEN, CaptureLogger, LoggingLevel, TemporaryHubRepo, is_staging_test, torch_device, ) class GenerationConfigTest(unittest.TestCase): @parameterized.expand([(None,), ("foo.json",)]) def test_save_load_config(self, config_name): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, config_name=config_name) loaded_config = GenerationConfig.from_pretrained(tmp_dir, config_name=config_name) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, None) def test_from_model_config(self): model_config = AutoConfig.from_pretrained("openai-community/gpt2") generation_config_from_model = GenerationConfig.from_model_config(model_config) default_generation_config = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(generation_config_from_model, default_generation_config) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def test_update(self): generation_config = GenerationConfig() update_kwargs = { "max_new_tokens": 1024, "foo": "bar", } update_kwargs_copy = copy.deepcopy(update_kwargs) unused_kwargs = generation_config.update(**update_kwargs) # update_kwargs was not modified (no side effects) self.assertEqual(update_kwargs, update_kwargs_copy) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens, 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(unused_kwargs, {"foo": "bar"}) def test_kwarg_init(self): """Tests that we can overwrite attributes at `from_pretrained` time.""" default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) # default value def test_validate(self): """ Tests that the `validate` method is working as expected. Note that `validate` is called at initialization time """ logger = transformers_logging.get_logger("transformers.generation.configuration_utils") # A correct configuration will not throw any warning logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: GenerationConfig() self.assertEqual(len(captured_logs.out), 0) # Inconsequent but technically wrong configuration will throw a warning (e.g. setting sampling # parameters with `do_sample=False`). May be escalated to an error in the future. logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: GenerationConfig(return_dict_in_generate=False, output_scores=True) self.assertNotEqual(len(captured_logs.out), 0) logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: generation_config_bad_temperature = GenerationConfig(do_sample=False, temperature=0.5) # store for later self.assertNotEqual(len(captured_logs.out), 0) # Expanding on the case above, we can update a bad configuration to get rid of the warning. Ideally, # that is done by unsetting the parameter (i.e. setting it to None) logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: # BAD - 0.9 means it is still set, we should warn generation_config_bad_temperature.update(temperature=0.9) self.assertNotEqual(len(captured_logs.out), 0) logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: # CORNER CASE - 1.0 is the default, we can't detect whether it is set by the user or not, we shouldn't warn generation_config_bad_temperature.update(temperature=1.0) self.assertEqual(len(captured_logs.out), 0) logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: # OK - None means it is unset, nothing to warn about generation_config_bad_temperature.update(temperature=None) self.assertEqual(len(captured_logs.out), 0) # Impossible sets of constraints/parameters will raise an exception with self.assertRaises(ValueError): GenerationConfig(do_sample=False, num_beams=1, num_return_sequences=2) with self.assertRaises(ValueError): # dummy constraint GenerationConfig(do_sample=True, num_beams=2, constraints=["dummy"]) with self.assertRaises(ValueError): GenerationConfig(do_sample=True, num_beams=2, force_words_ids=[[[1, 2, 3]]]) # Passing `generate()`-only flags to `validate` will raise an exception with self.assertRaises(ValueError): GenerationConfig(logits_processor="foo") # Model-specific parameters will NOT raise an exception or a warning logger.warning_once.cache_clear() with CaptureLogger(logger) as captured_logs: GenerationConfig(foo="bar") self.assertEqual(len(captured_logs.out), 0) # By default we throw a short warning. However, we log with INFO level the details. # Default: we don't log the incorrect input values, only a short summary. We explain how to get more details. logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as captured_logs: GenerationConfig(do_sample=False, temperature=0.5) self.assertNotIn("0.5", captured_logs.out) self.assertTrue(len(captured_logs.out) < 150) # short log self.assertIn("Set `TRANSFORMERS_VERBOSITY=info` for more details", captured_logs.out) # INFO level: we share the full deets logger.warning_once.cache_clear() logger.info_once.cache_clear() with LoggingLevel(logging.INFO): with CaptureLogger(logger) as captured_logs: GenerationConfig(do_sample=False, temperature=0.5) self.assertIn("0.5", captured_logs.out) self.assertTrue(len(captured_logs.out) > 400) # long log self.assertNotIn("Set `TRANSFORMERS_VERBOSITY=info` for more details", captured_logs.out) # Finally, we can set `strict=True` to raise an exception on what would otherwise be a warning. generation_config = GenerationConfig() generation_config.temperature = 0.5 generation_config.do_sample = False with self.assertRaises(ValueError): generation_config.validate(strict=True) def test_refuse_to_save(self): """Tests that we refuse to save a generation config that fails validation.""" # setting the temperature alone is invalid, as we also need to set do_sample to True -> throws a warning that # is caught, doesn't save, and raises an exception config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue("`temperature` is set to `0.5`" in str(exc.exception)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # greedy decoding throws an exception if we try to return multiple sequences -> throws an exception that is # caught, doesn't save, and raises a warning config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as exc: config.save_pretrained(tmp_dir) self.assertTrue("Fix these issues to save the configuration." in str(exc.exception)) self.assertTrue( "Greedy methods without beam search do not support `num_return_sequences` different than 1" in str(exc.exception) ) self.assertTrue(len(os.listdir(tmp_dir)) == 0) # Final check: no logs at warning level/warnings/exceptions thrown if it is correct, and file is saved. config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: # Catch warnings with warnings.catch_warnings(record=True) as captured_warnings: # Catch logs (up to WARNING level, the default level) with LoggingLevel(logging.WARNING): logger = transformers_logging.get_logger("transformers.generation.configuration_utils") with CaptureLogger(logger) as captured_logs: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertEqual(len(captured_logs.out), 0) self.assertEqual(len(os.listdir(tmp_dir)), 1) def test_generation_mode(self): """Tests that the `get_generation_mode` method is working as expected.""" config = GenerationConfig() self.assertEqual(config.get_generation_mode(), GenerationMode.GREEDY_SEARCH) config = GenerationConfig(do_sample=True) self.assertEqual(config.get_generation_mode(), GenerationMode.SAMPLE) config = GenerationConfig(num_beams=2) self.assertEqual(config.get_generation_mode(), GenerationMode.BEAM_SEARCH) config = GenerationConfig(top_k=10, do_sample=False, penalty_alpha=0.6) self.assertEqual(config.get_generation_mode(), GenerationMode.CONTRASTIVE_SEARCH) config = GenerationConfig() self.assertEqual(config.get_generation_mode(assistant_model="foo"), GenerationMode.ASSISTED_GENERATION) def test_static_cache_without_cache_config(self): """Regression test for #35026 -- static cache should work without a cache config.""" config = GenerationConfig(cache_implementation="static") self.assertEqual(config.cache_implementation, "static") self.assertEqual(config.cache_config, None) class GenerationConfigSerializationTest(unittest.TestCase): def test_serialize_generation_sequence_bias(self): """Tests that GenerationConfig is serialized and SequenceBiasLogitsProcessor is initialized with sequence_bias parameter""" generation_config = GenerationConfig() sequence_bias = [[[45, 67], -0.6], [[89], 1.2]] generation_config.sequence_bias = sequence_bias with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.sequence_bias, sequence_bias) expected_sequence_bias = {(45, 67): -0.6, (89,): 1.2} bias_logits_processor = SequenceBiasLogitsProcessor(new_config.sequence_bias) self.assertDictEqual(bias_logits_processor.sequence_bias, expected_sequence_bias) def test_serialize_generation_min_length_eos_token(self): """Tests that GenerationConfig is serialized and MinLengthLogitsProcessor is initialized with min_length and eos_token_id""" eos_token_id = 0 min_length = 10 generation_config = GenerationConfig(min_length=min_length, eos_token_id=eos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_length, min_length) self.assertEqual(new_config.eos_token_id, eos_token_id) min_dist_processor = MinLengthLogitsProcessor( min_length=new_config.min_length, eos_token_id=new_config.eos_token_id ) self.assertEqual(min_dist_processor.min_length, min_length) self.assertEqual(min_dist_processor.eos_token_id, eos_token_id) def test_serialize_generation_min_new_tokens(self): """Tests that GenerationConfig is serialized and MinNewTokensLengthLogitsProcessor is initialized with min_new_tokens""" eos_token_id = 0 min_new_tokens = 5 prompt_length_to_skip = 2 generation_config = GenerationConfig(min_new_tokens=min_new_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_new_tokens, min_new_tokens) min_new_tokens_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=prompt_length_to_skip, min_new_tokens=new_config.min_new_tokens, eos_token_id=eos_token_id, ) self.assertEqual(min_new_tokens_processor.min_new_tokens, min_new_tokens) def test_serialize_generation_temperature(self): """Tests that GenerationConfig is serialized and TemperatureLogitsWarper is initialized with temperature""" temperature = 2.0 generation_config = GenerationConfig(temperature=temperature, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.temperature, temperature) temperature_logits_warper = TemperatureLogitsWarper(temperature=new_config.temperature) self.assertEqual(temperature_logits_warper.temperature, temperature) def test_serialize_generation_repetition_penalty(self): """Tests that GenerationConfig is serialized and RepetitionPenaltyLogitsProcessor is initialized with repetition_penalty""" penalty = 2.0 generation_config = GenerationConfig(repetition_penalty=penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.repetition_penalty, penalty) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=new_config.repetition_penalty) self.assertEqual(rep_penalty_proc.penalty, penalty) def test_serialize_generation_encoder_repetition_penalty(self): """Tests that GenerationConfig is serialized and EncoderRepetitionPenaltyLogitsProcessor is initialized with penalty and input_ids""" penalty = 2.0 input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) generation_config = GenerationConfig(encoder_repetition_penalty=penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.encoder_repetition_penalty, penalty) rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor( penalty=new_config.encoder_repetition_penalty, encoder_input_ids=input_ids ) self.assertEqual(rep_penalty_proc.penalty, 1 / penalty) torch.testing.assert_close(rep_penalty_proc.encoder_input_ids, input_ids) def test_serialize_generation_top_p(self): """Tests that GenerationConfig is serialized and TopPLogitsWarper is initialized with top_p""" top_p = 0.8 generation_config = GenerationConfig(top_p=top_p, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.top_p, top_p) rep_penalty_proc = TopPLogitsWarper(top_p=new_config.top_p) self.assertEqual(rep_penalty_proc.top_p, top_p) def test_serialize_generation_top_k(self): """Tests that GenerationConfig is serialized and TopKLogitsWarper is initialized with top_k""" top_k = 2 generation_config = GenerationConfig(top_k=top_k, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.top_k, top_k) top_k_logits_wrap = TopKLogitsWarper(top_k=new_config.top_k) self.assertEqual(top_k_logits_wrap.top_k, top_k) def test_serialize_generation_min_p(self): """Tests that GenerationConfig is serialized and MinPLogitsWarper is initialized with min_p""" min_p = 0.8 generation_config = GenerationConfig(min_p=min_p, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.min_p, min_p) min_k_logits_wrap = MinPLogitsWarper(min_p=new_config.min_p) self.assertEqual(min_k_logits_wrap.min_p, min_p) def test_serialize_generation_typical_p(self): """Tests that GenerationConfig is serialized and TypicalLogitsWarper is initialized with mass""" mass = 0.8 generation_config = GenerationConfig(typical_p=mass, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.typical_p, mass) typical_p_logits_wrap = TypicalLogitsWarper(mass=new_config.typical_p) self.assertEqual(typical_p_logits_wrap.mass, mass) def test_serialize_generation_epsilon_cutoff(self): """Tests that GenerationConfig is serialized and EpsilonLogitsWarper is initialized with epsilon""" epsilon = 0.8 generation_config = GenerationConfig(epsilon_cutoff=epsilon, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.epsilon_cutoff, epsilon) epsilon_logits_wrap = EpsilonLogitsWarper(epsilon=new_config.epsilon_cutoff) self.assertEqual(epsilon_logits_wrap.epsilon, epsilon) def test_serialize_generation_eta_cutoff(self): """Tests that GenerationConfig is serialized and EtaLogitsWarper is initialized with epsilon""" epsilon = 0.8 generation_config = GenerationConfig(eta_cutoff=epsilon, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.eta_cutoff, epsilon) eta_logits_wrap = EtaLogitsWarper(epsilon=new_config.eta_cutoff) self.assertEqual(eta_logits_wrap.epsilon, epsilon) def test_serialize_generation_ngram_size(self): """Tests that GenerationConfig is serialized and NoRepeatNGramLogitsProcessor is initialized with ngram_size""" ngram_size = 2 generation_config = GenerationConfig(no_repeat_ngram_size=ngram_size, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.no_repeat_ngram_size, ngram_size) no_repeat_ngram_proc = NoRepeatNGramLogitsProcessor(ngram_size=new_config.no_repeat_ngram_size) self.assertEqual(no_repeat_ngram_proc.ngram_size, ngram_size) def test_serialize_generation_encoder_ngram_size(self): """Tests that GenerationConfig is serialized and EncoderNoRepeatNGramLogitsProcessor is initialized with ngram_size""" ngram_size = 2 input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) generation_config = GenerationConfig(encoder_no_repeat_ngram_size=ngram_size, do_sample=True) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.encoder_no_repeat_ngram_size, ngram_size) encoder_no_repeat_ngram_proc = EncoderNoRepeatNGramLogitsProcessor( encoder_ngram_size=new_config.encoder_no_repeat_ngram_size, encoder_input_ids=input_ids ) self.assertEqual(encoder_no_repeat_ngram_proc.ngram_size, ngram_size) def test_serialize_generation_bad_words_ids(self): """Tests that GenerationConfig is serialized and NoBadWordsLogitsProcessor is initialized with bad_words_ids""" bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] generation_config = GenerationConfig(bad_words_ids=bad_word_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.bad_words_ids, bad_word_tokens) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=new_config.bad_words_ids) self.assertSequenceEqual(no_bad_words_dist_proc.bad_word_ids, bad_word_tokens) def test_serialize_generation_num_beams(self): """Tests that GenerationConfig is serialized and PrefixConstrainedLogitsProcessor is initialized with num_beams""" num_beams = 1 def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] generation_config = GenerationConfig(num_beams=num_beams) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.num_beams, num_beams) prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor( prefix_allowed_tokens_fn, num_beams=new_config.num_beams ) self.assertEqual(prefix_constrained_logits_proc._num_beams, num_beams) def test_serialize_generation_diversity_penalty_and_num_bean_groups(self): """Tests that GenerationConfig is serialized and HammingDiversityLogitsProcessor is initialized with diversity_penalty_and_num_bean_groups""" num_beams = 2 num_beam_groups = 2 diversity_penalty = 1.0 generation_config = GenerationConfig( num_beams=num_beams, diversity_penalty=diversity_penalty, num_beam_groups=num_beam_groups ) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.num_beams, num_beams) self.assertEqual(new_config.diversity_penalty, diversity_penalty) self.assertEqual(new_config.num_beam_groups, num_beam_groups) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=new_config.diversity_penalty, num_beams=new_config.num_beams, num_beam_groups=new_config.num_beam_groups, ) self.assertEqual(diversity_logits_processor._num_beams, num_beams) self.assertEqual(diversity_logits_processor._diversity_penalty, diversity_penalty) self.assertEqual(diversity_logits_processor._num_sub_beams, num_beams // num_beam_groups) def test_serialize_generation_bos_token_id(self): """Tests that GenerationConfig is serialized and ForcedBOSTokenLogitsProcessor is initialized with bos_token_id""" bos_token_id = 0 generation_config = GenerationConfig(bos_token_id=bos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.bos_token_id, bos_token_id) logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=new_config.bos_token_id) self.assertEqual(logits_processor.bos_token_id, bos_token_id) def test_serialize_generation_eos_token_id(self): """Tests that GenerationConfig is serialized and ForcedEOSTokenLogitsProcessor is initialized with eos_token_id""" eos_token_id = 0 max_length = 5 generation_config = GenerationConfig(eos_token_id=eos_token_id) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.eos_token_id, eos_token_id) logits_processor = ForcedEOSTokenLogitsProcessor( max_length=max_length, eos_token_id=new_config.eos_token_id, device=torch_device ) self.assertEqual(logits_processor.eos_token_id, eos_token_id) def test_serialize_generation_exponential_decay_length_penalty(self): """Tests that GenerationConfig is serialized and ExponentialDecayLengthPenalty is initialized with regulation_start and regulation_factor""" eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids_seq_length = 10 exponential_decay_length_penalty = (penalty_start, penalty_factor) generation_config = GenerationConfig(exponential_decay_length_penalty=exponential_decay_length_penalty) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.exponential_decay_length_penalty, [penalty_start, penalty_factor]) exponential_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=new_config.exponential_decay_length_penalty, eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) self.assertEqual( exponential_decay_processor.regulation_start, exponential_decay_length_penalty[0] + input_ids_seq_length ) self.assertEqual(exponential_decay_processor.regulation_factor, exponential_decay_length_penalty[1]) def test_serialize_generation_begin_suppress_tokens(self): """Tests that GenerationConfig is serialized and SuppressTokensAtBeginLogitsProcessor is initialized with begin_suppress_token and begin_index""" begin_suppress_tokens = [220, 50256] begin_index = 0 generation_config = GenerationConfig(begin_suppress_tokens=begin_suppress_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.begin_suppress_tokens, begin_suppress_tokens) suppress_processor = SuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=new_config.begin_suppress_tokens, begin_index=begin_index ) self.assertSequenceEqual(suppress_processor.begin_suppress_tokens, begin_suppress_tokens) self.assertEqual(suppress_processor.begin_index, begin_index) def test_serialize_generation_suppress_tokens(self): """Tests that GenerationConfig is serialized and SuppressTokensLogitsProcessor is initialized with suppress_token""" suppress_tokens = [220, 50256] generation_config = GenerationConfig(suppress_tokens=suppress_tokens) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertSequenceEqual(new_config.suppress_tokens, suppress_tokens) suppress_processor = SuppressTokensLogitsProcessor(suppress_tokens=new_config.suppress_tokens) self.assertSequenceEqual(suppress_processor.suppress_tokens, suppress_tokens) def test_serialize_generation_guidance_scale(self): """Tests that GenerationConfig is serialized and ClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale""" guidance_scale = 2.0 generation_config = GenerationConfig(guidance_scale=guidance_scale) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.guidance_scale, guidance_scale) classifier_processor = ClassifierFreeGuidanceLogitsProcessor(guidance_scale=new_config.guidance_scale) self.assertEqual(classifier_processor.guidance_scale, guidance_scale) def test_serialize_generation_guidance_scale_unbatched(self): """Tests that GenerationConfig is serialized and UnbatchedClassifierFreeGuidanceLogitsProcessor is initialized with guidance_scale""" guidance_scale = 2.0 input_ids = torch.LongTensor([[0]]) generation_config = GenerationConfig(guidance_scale=guidance_scale) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.guidance_scale, guidance_scale) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(new_config.guidance_scale, {}, input_ids) self.assertEqual(cfg.guidance_scale, guidance_scale) def test_serialize_generation_watermarking_config(self): """Tests that GenerationConfig is serialized and WatermarkLogitsProcessor is initialized with WatermarkingConfig parameters""" vocab_size = 20 bias = 2.0 greenlist_ratio = 0.5 hashing_key = 10 seeding_scheme = "lefthash" context_width = 10 watermarking_config = WatermarkingConfig( bias=bias, greenlist_ratio=greenlist_ratio, hashing_key=hashing_key, seeding_scheme=seeding_scheme, context_width=context_width, ) generation_config = GenerationConfig(watermarking_config=watermarking_config) with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.watermarking_config.bias, bias) self.assertEqual(new_config.watermarking_config.greenlist_ratio, greenlist_ratio) self.assertEqual(new_config.watermarking_config.hashing_key, hashing_key) self.assertEqual(new_config.watermarking_config.seeding_scheme, seeding_scheme) self.assertEqual(new_config.watermarking_config.context_width, context_width) watermark = WatermarkLogitsProcessor( vocab_size=vocab_size, device=torch_device, greenlist_ratio=new_config.watermarking_config.greenlist_ratio, bias=new_config.watermarking_config.bias, hashing_key=new_config.watermarking_config.hashing_key, seeding_scheme=new_config.watermarking_config.seeding_scheme, context_width=new_config.watermarking_config.context_width, ) self.assertEqual(watermark.bias, bias) self.assertEqual(watermark.greenlist_size, int(vocab_size * greenlist_ratio)) self.assertEqual(watermark.hash_key, hashing_key) self.assertEqual(watermark.seeding_scheme, seeding_scheme) self.assertEqual(watermark.context_width, context_width) @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub(tmp_repo.repo_id, token=self._token) new_config = GenerationConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub(tmp_repo.repo_id, token=self._token) new_config = GenerationConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(tmp_repo.repo_id) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_on_pr_revision(self): with TemporaryHubRepo(token=self._token) as tmp_repo: # create a PR pr = create_pull_request(repo_id=tmp_repo.repo_id, title="Test PR", token=self._token) revision = f"refs/pr/{pr.num}" # push to PR ref config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub(tmp_repo.repo_id, token=self._token, revision=revision) # load from PR ref new_config = GenerationConfig.from_pretrained(tmp_repo.repo_id, revision=revision) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k))
transformers/tests/generation/test_configuration_utils.py/0
{ "file_path": "transformers/tests/generation/test_configuration_utils.py", "repo_id": "transformers", "token_count": 16134 }
542
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class AlignProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = AlignProcessor def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) image_processor_map = { "do_resize": True, "size": 20, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): return BertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) def get_image_processor(self, **kwargs): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = AlignProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = AlignProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, BertTokenizer) self.assertIsInstance(processor_fast.tokenizer, BertTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, EfficientNetImageProcessor) self.assertIsInstance(processor_fast.image_processor, EfficientNetImageProcessor) def test_save_load_pretrained_additional_features(self): processor = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = AlignProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, BertTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, EfficientNetImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc: self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, padding="max_length", max_length=64) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertSetEqual(set(inputs.keys()), {"input_ids", "token_type_ids", "attention_mask", "pixel_values"}) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = AlignProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/align/test_processing_align.py/0
{ "file_path": "transformers/tests/models/align/test_processing_align.py", "repo_id": "transformers", "token_count": 2935 }
543
# Copyright 2021 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ViTImageProcessor, ViTImageProcessorFast, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_torchvision, require_vision sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class AutoImageProcessorTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_image_processor_from_model_shortcut(self): config = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_key(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_feature_extractor_key(self): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_new_filename(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = CLIPConfig() # Create a dummy config file with image_proceesor_type processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) # remove image_processor_type to make sure config.json alone is enough to load image processor locally config_dict = AutoImageProcessor.from_pretrained(tmpdirname).to_dict() config_dict.pop("image_processor_type") config = CLIPImageProcessor(**config_dict) # save in new folder model_config.save_pretrained(tmpdirname) config.save_pretrained(tmpdirname) config = AutoImageProcessor.from_pretrained(tmpdirname) # make sure private variable is not incorrectly saved dict_as_saved = json.loads(config.to_json_string()) self.assertTrue("_processor_class" not in dict_as_saved) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_file(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) config = AutoImageProcessor.from_pretrained(processor_tmpfile) self.assertIsInstance(config, CLIPImageProcessor) def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "clip-base is not a local folder and is not a valid model identifier" ): _ = AutoImageProcessor.from_pretrained("clip-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoImageProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_image_processor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): _ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model") @require_vision @require_torchvision def test_use_fast_selection(self): checkpoint = "hf-internal-testing/tiny-random-vit" # TODO: @yoni, change in v4.48 (when use_fast set to True by default) # Slow image processor is selected by default image_processor = AutoImageProcessor.from_pretrained(checkpoint) self.assertIsInstance(image_processor, ViTImageProcessor) # Fast image processor is selected when use_fast=True image_processor = AutoImageProcessor.from_pretrained(checkpoint, use_fast=True) self.assertIsInstance(image_processor, ViTImageProcessorFast) # Slow image processor is selected when use_fast=False image_processor = AutoImageProcessor.from_pretrained(checkpoint, use_fast=False) self.assertIsInstance(image_processor, ViTImageProcessor) def test_from_pretrained_dynamic_image_processor(self): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") # If remote code is disabled, we can't load this config. with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") # Test the dynamic module is loaded only once. reloaded_image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertIs(image_processor.__class__, reloaded_image_processor.__class__) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir, trust_remote_code=True) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_processor.py"))) # Assert we saved custom code self.assertEqual( reloaded_image_processor.auto_map["AutoImageProcessor"], "image_processor.NewImageProcessor" ) self.assertEqual(reloaded_image_processor.__class__.__name__, "NewImageProcessor") # Test the dynamic module is reloaded if we force it. reloaded_image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True, force_download=True ) self.assertIsNot(image_processor.__class__, reloaded_image_processor.__class__) def test_new_image_processor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, CustomImageProcessor) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(ValueError): AutoImageProcessor.register(CLIPConfig, CLIPImageProcessor) with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) image_processor = CustomImageProcessor.from_pretrained(tmpdirname) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) new_image_processor = AutoImageProcessor.from_pretrained(tmp_dir) self.assertIsInstance(new_image_processor, CustomImageProcessor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_image_processor_conflict(self): class NewImageProcessor(CLIPImageProcessor): is_local = True try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, NewImageProcessor) # If remote code is not set, the default is to use local image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(not hasattr(image_processor, "is_local")) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
transformers/tests/models/auto/test_image_processing_auto.py/0
{ "file_path": "transformers/tests/models/auto/test_image_processing_auto.py", "repo_id": "transformers", "token_count": 5156 }
544
# Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch BART model.""" import copy import tempfile import unittest import timeout_decorator # noqa from transformers import BartConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoModelForSequenceClassification, BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, BartTokenizer, pipeline, ) from transformers.models.bart.modeling_bart import BartDecoder, BartEncoder, shift_tokens_right def prepare_bart_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class BartModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_bart_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return BartConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 config.vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BartModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = BartModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = BartEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = BartDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class BartHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size def test_sequence_classification_forward(self): config, input_ids, batch_size = self._get_config_and_data() labels = _long_tensor([2] * batch_size).to(torch_device) model = BartForSequenceClassification(config) model.to(torch_device) outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels) expected_shape = torch.Size((batch_size, config.num_labels)) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_question_answering_forward(self): config, input_ids, batch_size = self._get_config_and_data() sequence_labels = ids_tensor([batch_size], 2).to(torch_device) model = BartForQuestionAnswering(config) model.to(torch_device) outputs = model( input_ids=input_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.assertEqual(outputs["start_logits"].shape, input_ids.shape) self.assertEqual(outputs["end_logits"].shape, input_ids.shape) self.assertIsInstance(outputs["loss"].item(), float) @timeout_decorator.timeout(1) def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device) lm_model = BartForConditionalGeneration(config) lm_model.to(torch_device) outputs = lm_model(input_ids=input_ids, labels=lm_labels) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) self.assertIsInstance(outputs["loss"].item(), float) def test_lm_uneven_forward(self): config = BartConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = BartForConditionalGeneration(config).to(torch_device) context = torch.tensor( [[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long ) summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long) outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long) config = BartConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) lm_model = BartForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 generated_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) @slow def test_tokenization(self): tokenizer = BartTokenizer.from_pretrained("facebook/bart-large") examples = [" Hello world", " DomDramg"] # need leading spaces for equality fairseq_results = [ torch.tensor([0, 20920, 232, 2]), torch.tensor([0, 11349, 495, 4040, 571, 2]), ] for ex, desired_result in zip(examples, fairseq_results): bart_toks = tokenizer.encode(ex, return_tensors="pt").squeeze() assert_tensors_close(desired_result.long(), bart_toks, prefix=ex) @require_torch_fp16 def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = BartForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_resize_tokens_embeddings_more(self): config, input_ids, _ = self._get_config_and_data() def _get_embs(m): return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone()) model = BartForConditionalGeneration(config).eval().to(torch_device) input, output = _get_embs(model) self.assertTrue(torch.eq(input, output).all()) new_vocab_size = 45 model.resize_token_embeddings(new_vocab_size) input_new, output_new = _get_embs(model) self.assertEqual(input_new.shape, (new_vocab_size, config.d_model)) self.assertEqual(output_new.shape, (new_vocab_size, config.d_model)) self.assertTrue(torch.eq(input_new, output_new).all()) @require_torch class BartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (BartModel, BartForConditionalGeneration, BartForSequenceClassification, BartForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": BartModel, "fill-mask": BartForConditionalGeneration, "question-answering": BartForQuestionAnswering, "summarization": BartForConditionalGeneration, "text-classification": BartForSequenceClassification, "text-generation": BartForCausalLM, "text2text-generation": BartForConditionalGeneration, "translation": BartForConditionalGeneration, "zero-shot": BartForSequenceClassification, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False # Fix me Michael test_pruning = False def setUp(self): self.model_tester = BartModelTester(self) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # BartForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (BartModel, BartForConditionalGeneration, BartForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = BartForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @unittest.skip( reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245" ) def test_load_save_without_tied_weights(self): pass def test_resize_embeddings_persists_embeddings_type(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.scale_embedding = True model = BartForConditionalGeneration(config) old_type = type(model.model.decoder.embed_tokens) model.resize_token_embeddings(new_num_tokens=config.vocab_size) new_type = type(model.model.decoder.embed_tokens) self.assertIs(old_type, new_type) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @slow class FastIntegrationTests(unittest.TestCase): """These tests are useful for debugging since they operate on a model with 1 encoder layer and 1 decoder layer.""" @cached_property def tok(self): return BartTokenizer.from_pretrained("facebook/bart-large") @cached_property def xsum_1_1_model(self): return BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-xsum-1-1") def test_xsum_1_1_generation(self): hf = self.xsum_1_1_model tok = self.tok ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes." ) EXPECTED = ( "</s>" " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." "</s>" ) dct = tok(ARTICLE, return_tensors="pt") generated_ids = hf.generate(**dct, num_beams=4) result = tok.batch_decode(generated_ids)[0] assert EXPECTED == result def test_xsum_1_1_batch_generation(self): # test batch batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="pt", padding="longest", truncation=True, ) generated_ids = self.xsum_1_1_model.generate(**batch, num_beams=4) result = self.tok.batch_decode(generated_ids) assert result[0] == ( "</s>" " The International Criminal Court (ICC) has announced that it has been announced by the International" " Criminal court." "</s><pad><pad><pad><pad><pad>" ) assert result[1] == ( "</s>" " An investigation into the crash that killed at least 10 people in the French capital has been" " released by the French police investigating the crash." "</s>" ) def test_encoder_equiv(self): # test batch batch = self.tok( [ "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories." " The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is" " based. The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted" ' its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including' ' East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination' " into the situation in Palestinian territories, paving the way for possible war crimes investigations" " against Israelis. As members of the court, Palestinians may be subject to counter-charges as well." " Israel and the United States, neither of which is an ICC member, opposed the Palestinians' efforts" " to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony," ' said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome' ' Statute today, the world is also a step closer to ending a long era of impunity and injustice," he' ' said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of' ' justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was' ' just the first step for the Palestinians. "As the Rome Statute today enters into force for the State' " of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a" ' State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she' ' said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize' " Palestine for joining the ICC should immediately end their pressure, and countries that support" " universal acceptance of the court's treaty should speak out to welcome its membership,\" said" " Balkees Jarrah, international justice counsel for the group. \"What's objectionable is the attempts" " to undermine international justice, not Palestine's decision to join a treaty to which over 100" ' countries around the world are members." In January, when the preliminary ICC examination was' " opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was" ' overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s' ' decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we' ' do not believe that it is eligible to join the ICC," the State Department said in a statement. It' ' urged the warring sides to resolve their differences through direct negotiations. "We will continue' ' to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said.' " But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows' " the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor" ' Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality."' " The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The" " inquiry will include alleged war crimes committed since June. The International Criminal Court was" " set up in 2002 to prosecute genocide, crimes against humanity and war crimes.", "The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted" " Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor" ' Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A' " person who has such a video needs to immediately give it to the investigators.\" Robin's comments" " follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the" " French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was" " recovered from a phone at the wreckage site. The two publications described the supposed video, but" " did not post it on their websites. The publications said that they watched the video, which was" " found by a source close to the investigation. \"One can hear cries of 'My God' in several" ' languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps' " of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy" ' shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing' " scene,\" said Julian Reichelt, editor-in-chief of Bild online. An official with France's accident" " investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc" " Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the" ' Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell' ' phones have been collected at the site, he said, but that they "hadn\'t been exploited yet."' " Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute" " in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working" " hand-in-hand with investigators. But none of the cell phones found so far have been sent to the" " institute, Menichini said. Asked whether staff involved in the search could have leaked a memory" ' card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett:' ' Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are' ' "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is' " something we did not know before. ... Overall we can say many things of the investigation weren't" ' revealed by the investigation at the beginning," he said. What was mental state of Germanwings' " co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled" " depression years before he took the controls of Germanwings Flight 9525, which he's accused of" " deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school" ' in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email' " correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa" " said, included medical documents he submitted in connection with resuming his flight training. The" " announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz's battle" " with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa," " whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday" ' as a "swift and seamless clarification" and said it was sharing the information and documents --' " including training and medical records -- with public prosecutors. Spohr traveled to the crash site" " Wednesday, where recovery teams have been working for the past week to recover human remains and" " plane debris scattered across a steep mountainside. He saw the crisis center set up in" " Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving" " families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no" " visible human remains were left at the site but recovery teams would keep searching. French" " President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the" " victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini" " said. Among those personal belongings could be more cell phones belonging to the 144 passengers and" " six crew on board. Check out the latest from our correspondents . The details about Lubitz's" " correspondence with the flight school during his training were among several developments as" " investigators continued to delve into what caused the crash and Lubitz's possible motive for" " downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical" ' certificate, had passed all his examinations and "held all the licenses required." Earlier, a' " spokesman for the prosecutor's office in Dusseldorf, Christoph Kumpa, said medical records reveal" " Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent" " psychotherapy before he got his pilot's license. Kumpa emphasized there's no evidence suggesting" " Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether" " Lubitz feared his medical condition would cause him to lose his pilot's license, a European" ' government official briefed on the investigation told CNN on Tuesday. While flying was "a big part' " of his life,\" the source said, it's only one theory being considered. Another source, a law" " enforcement official briefed on the investigation, also told CNN that authorities believe the" " primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly" " because of his medical problems. Lubitz's girlfriend told investigators he had seen an eye doctor" " and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had" " psychological issues, the European government official said. But no matter what details emerge about" " his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the' " fact that maybe they weren't going to keep doing their job and they're upset about that and so" ' they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels' " entitled to also take that rage and turn it outward on 149 other people who had nothing to do with" " the person's problems.\" Germanwings crash compensation: What we know . Who was the captain of" " Germanwings Flight 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from" " Dusseldorf, while Laura Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff," " Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.", ], return_tensors="pt", padding="longest", truncation=True, ) features = self.xsum_1_1_model.get_encoder()(**batch).last_hidden_state expected = [[-0.0828, -0.0251, -0.0674], [0.1277, 0.3311, -0.0255], [0.2613, -0.0840, -0.2763]] assert_tensors_close(features[0, :3, :3], torch.tensor(expected), atol=1e-3) @require_torch @require_sentencepiece @require_tokenizers class BartModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return BartTokenizer.from_pretrained("facebook/bart-large") @slow def test_inference_no_head(self): model = BartModel.from_pretrained("facebook/bart-large").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = input_ids.ne(model.config.pad_token_id) with torch.no_grad(): output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]]], device=torch_device ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) @slow def test_base_mask_filling(self): pbase = pipeline(task="fill-mask", model="facebook/bart-base") src_text = [" I went to the <mask>."] results = [x["token_str"] for x in pbase(src_text)] assert " bathroom" in results @slow def test_large_mask_filling(self): plarge = pipeline(task="fill-mask", model="facebook/bart-large") src_text = [" I went to the <mask>."] results = [x["token_str"] for x in plarge(src_text)] expected_results = [" bathroom", " gym", " wrong", " movies", " hospital"] self.assertListEqual(results, expected_results) @slow def test_mnli_inference(self): example_b = [0, 31414, 232, 328, 740, 1140, 69, 46078, 1588, 2, 1] input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], example_b]) model = AutoModelForSequenceClassification.from_pretrained("facebook/bart-large-mnli").to( torch_device ) # eval called in from_pre attention_mask = input_ids.ne(model.config.pad_token_id) # Test that model hasn't changed with torch.no_grad(): outputs = model(input_ids=input_ids, attention_mask=attention_mask) batched_logits = outputs.logits expected_shape = torch.Size((2, 3)) self.assertEqual(batched_logits.shape, expected_shape) expected_slice = torch.tensor([[0.1907, 1.4342, -1.0289]], device=torch_device) logits_arr = batched_logits[0].detach() # Test that padding does not change results input_ids_no_pad = _long_tensor([example_b[:-1]]) attention_mask_no_pad = input_ids_no_pad.ne(model.config.pad_token_id) with torch.no_grad(): logits2 = model(input_ids=input_ids_no_pad, attention_mask=attention_mask_no_pad).logits.squeeze() assert_tensors_close(batched_logits[1], logits2, atol=1e-3) assert_tensors_close(expected_slice, logits_arr, atol=1e-3) @slow def test_xsum_summarization_same_as_fairseq(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-xsum").to(torch_device) tok = self.default_tokenizer PGE_ARTICLE = """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""" EXPECTED_SUMMARY = ( "</s>" "California's largest power company has begun shutting off electricity to thousands of customers in the" " state." "</s>" ) dct = tok.batch_encode_plus( [PGE_ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="pt", ).to(torch_device) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, max_length=62, min_length=11, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True, decoder_start_token_id=model.config.eos_token_id, ) decoded = tok.batch_decode(hypotheses_batch) self.assertEqual(EXPECTED_SUMMARY, decoded[0]) def test_xsum_config_generation_params(self): config = BartConfig.from_pretrained("facebook/bart-large-xsum") expected_params = {"num_beams": 6, "do_sample": False, "early_stopping": True, "length_penalty": 1.0} config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()} self.assertDictEqual(expected_params, config_params) @slow def test_cnn_summarization_same_as_fairseq(self): hf = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) tok = BartTokenizer.from_pretrained("facebook/bart-large") FRANCE_ARTICLE = ( # @noq " Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) # The below article tests that we don't add any hypotheses outside of the top n_beams IRAN_ARTICLE = ( " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) dct = tok.batch_encode_plus( [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY], max_length=1024, padding="max_length", truncation="only_first", return_tensors="pt", ) self.assertEqual(1024, dct["input_ids"].shape[1]) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, ) assert hypotheses_batch[:, 1].eq(0).all().item() EXPECTED = [ "</s><s>" "A French prosecutor says he is not aware of any video footage from on board the plane. Two German " "magazines claim to have found a cell phone video showing the crash. The publications say they watched " "the video, which was found by a source close to the investigation. All 150 on board Germanwings Flight " "9525 were killed." "</s>", "</s><s>" "Palestinian Authority becomes 123rd member of the International Criminal Court. The move gives the court " "jurisdiction over alleged crimes in Palestinian territories. Israel and the United States opposed the " "Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said it was a " "move toward greater justice." "</s><pad><pad><pad><pad>", "</s><s>" "U.S. and its negotiating partners reached a strong framework agreement with Iran. Peter Bergen: The " "debate that has already begun will likely result in more heat than light. He says critics have made " "dubious assumptions and doubtful assertions. Bergen says the goal was to block Iran from building a " "nuclear weapon." "</s><pad><pad><pad>", "</s><s>" "Liana Barrientos, 39, has been married 10 times, sometimes within two weeks of each other. Prosecutors " "say the marriages were part of an immigration scam. She pleaded not guilty at State Supreme Court in the " "Bronx on Friday. If convicted, she faces up to four years in prison." "</s><pad><pad><pad><pad><pad>", ] generated_summaries = tok.batch_decode(hypotheses_batch.tolist()) assert generated_summaries == EXPECTED @slow def test_contrastive_search_bart(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = bart_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64, num_beams=1) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "Liana Barrientos, 39, pleaded not guilty to charges related to false marriage statements. " "Prosecutors say she married at least 10 times, sometimes within two weeks of each other. She is " "accused of being part of an immigration scam to get permanent residency. If convicted, she faces up " "to four years in" ], ) @slow def test_decoder_attention_mask(self): model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0).to( torch_device ) tokenizer = self.default_tokenizer sentence = "UN Chief Says There Is No <mask> in Syria" input_ids = tokenizer(sentence, return_tensors="pt").input_ids.to(torch_device) padding_size = 3 decoder_input_ids = torch.tensor( [ [model.config.decoder_start_token_id] + padding_size * [model.config.pad_token_id] + [model.config.bos_token_id] ], dtype=torch.long, device=torch_device, ) decoder_attention_mask = torch.where(decoder_input_ids == model.config.pad_token_id, 0, 1).to(torch_device) generated_ids = model.generate( input_ids=input_ids, use_cache=False, max_new_tokens=20, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) generated_sentence = tokenizer.batch_decode(generated_ids)[0] expected_sentence = "</s><pad><pad><pad><s>UN Chief Says There Is No Plan B for Peace in Syria</s>" self.assertEqual(generated_sentence, expected_sentence) class BartStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=50, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = BartConfig( vocab_size=self.vocab_size, d_model=self.d_model, encoder_layers=self.decoder_layers, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, forced_eos_token_id=None, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = BartDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = BartDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model( next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class BartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BartDecoder, BartForCausalLM) if is_torch_available() else () fx_comptatible = True test_pruning = False is_encoder_decoder = False test_missing_keys = False def setUp( self, ): self.model_tester = BartStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=BartConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="Decoder cannot keep gradients") def test_flex_attention_with_grads(): return
transformers/tests/models/bart/test_modeling_bart.py/0
{ "file_path": "transformers/tests/models/bart/test_modeling_bart.py", "repo_id": "transformers", "token_count": 35785 }
545
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import unittest from functools import lru_cache from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi_projection from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible @custom_tokenizers class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "cl-tohoku/bert-base-japanese" tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False space_between_special_tokens = True @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは", "世界", "##世界", "、", "##、", "。", "##。", "アップルストア", "外国", "##人", "参政", "##権", "此れ", "は", "猫", "です", ] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こんにちは 、 世界 。 こんばんは 、 世界 。" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。") self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) def test_pickle_mecab_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="mecab") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) def test_mecab_full_tokenizer_with_mecab_kwargs(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="mecab", mecab_kwargs={"mecab_dic": "ipadic"} ) text = "アップルストア" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["アップルストア"]) def test_mecab_tokenizer_ipadic(self): tokenizer = MecabTokenizer(mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic_lite(self): try: tokenizer = MecabTokenizer(mecab_dic="unidic_lite") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_unidic(self): try: import unidic self.assertTrue( os.path.isdir(unidic.DICDIR), "The content of unidic was not downloaded. Run `python -m unidic download` before running this test case. Note that this requires 2.1GB on disk.", ) tokenizer = MecabTokenizer(mecab_dic="unidic") except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_lower(self): tokenizer = MecabTokenizer(do_lower_case=True, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"], ) def test_mecab_tokenizer_with_option(self): try: tokenizer = MecabTokenizer( do_lower_case=True, normalize_text=False, mecab_option="-d /usr/local/lib/mecab/dic/jumandic" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"], ) def test_mecab_tokenizer_no_normalize(self): tokenizer = MecabTokenizer(normalize_text=False, mecab_dic="ipadic") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"], ) @require_sudachi_projection def test_pickle_sudachi_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="sudachi") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_sudachi_projection def test_sudachi_tokenizer_core(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core") # fmt: off self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], ) # fmt: on @require_sudachi_projection def test_sudachi_tokenizer_split_mode_A(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_B(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"]) @require_sudachi_projection def test_sudachi_tokenizer_split_mode_C(self): tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C") self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_split_mode_B(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_split_mode": "B"} ) self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "##人", "参政", "##権"]) @require_sudachi_projection def test_sudachi_tokenizer_projection(self): tokenizer = SudachiTokenizer( sudachi_dict_type="core", sudachi_split_mode="A", sudachi_projection="normalized_nouns" ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_full_tokenizer_with_sudachi_kwargs_sudachi_projection(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="sudachi", sudachi_kwargs={"sudachi_projection": "normalized_nouns"} ) self.assertListEqual(tokenizer.tokenize("これはねこです。"), ["此れ", "は", "猫", "です", "。"]) @require_sudachi_projection def test_sudachi_tokenizer_lower(self): tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_no_normalize(self): tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "]) # fmt: skip @require_sudachi_projection def test_sudachi_tokenizer_trim_whitespace(self): tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core") self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], ) @require_jumanpp def test_pickle_jumanpp_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="jumanpp") self.assertIsNotNone(tokenizer) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) filename = os.path.join(self.tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) tokens_loaded = tokenizer_new.tokenize(text) self.assertListEqual(tokens, tokens_loaded) @require_jumanpp def test_jumanpp_tokenizer(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"]) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_lower(self): tokenizer = JumanppTokenizer(do_lower_case=True) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_no_normalize(self): tokenizer = JumanppTokenizer(normalize_text=False) self.assertListEqual(tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "),["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"],) # fmt: skip @require_jumanpp def test_jumanpp_tokenizer_trim_whitespace(self): tokenizer = JumanppTokenizer(trim_whitespace=True) self.assertListEqual( tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"], ) @require_jumanpp def test_jumanpp_full_tokenizer_with_jumanpp_kwargs_trim_whitespace(self): tokenizer = self.tokenizer_class( self.vocab_file, word_tokenizer_type="jumanpp", jumanpp_kwargs={"trim_whitespace": True} ) text = "こんにちは、世界。\nこんばんは、世界。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) @require_jumanpp def test_jumanpp_tokenizer_ext(self): tokenizer = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。"), ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"], ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] # fmt: skip vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こんにちは"]) self.assertListEqual(tokenizer.tokenize("こんばんは"), ["こん", "##ばんは"]) self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは"), ["こん", "##ばんは", "[UNK]", "こんにちは"]) # fmt: skip def test_sentencepiece_tokenizer(self): tokenizer = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp") subword_tokenizer = tokenizer.subword_tokenizer tokens = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。") self.assertListEqual(tokens, ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"]) # fmt: skip tokens = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは") self.assertListEqual(tokens, ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class BertJapaneseCharacterTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "cl-tohoku/bert-base-japanese" tokenizer_class = BertJapaneseTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): return BertJapaneseTokenizer.from_pretrained(cls.tmpdirname, subword_tokenizer_type="character", **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、世界。" output_text = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。" return input_text, output_text def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, subword_tokenizer_type="character") tokens = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。") self.assertListEqual(tokens, ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"]) # fmt: skip self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def test_character_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = CharacterTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("こんにちは"), ["こ", "ん", "に", "ち", "は"]) self.assertListEqual(tokenizer.tokenize("こんにちほ"), ["こ", "ん", "に", "ち", "[UNK]"]) def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char") text = tokenizer.encode("ありがとう。", add_special_tokens=False) text_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_2 + [3] @custom_tokenizers class AutoTokenizerCustomTest(unittest.TestCase): def test_tokenizer_bert_japanese(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" tokenizer = AutoTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertIsInstance(tokenizer, BertJapaneseTokenizer) class BertTokenizerMismatchTest(unittest.TestCase): def test_tokenizer_mismatch_warning(self): EXAMPLE_BERT_JAPANESE_ID = "cl-tohoku/bert-base-japanese" with self.assertLogs("transformers", level="WARNING") as cm: BertTokenizer.from_pretrained(EXAMPLE_BERT_JAPANESE_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) ) EXAMPLE_BERT_ID = "google-bert/bert-base-cased" with self.assertLogs("transformers", level="WARNING") as cm: BertJapaneseTokenizer.from_pretrained(EXAMPLE_BERT_ID) self.assertTrue( cm.records[0].message.startswith( "The tokenizer class you load from this checkpoint is not the same type as the class this function" " is called from." ) )
transformers/tests/models/bert_japanese/test_tokenization_bert_japanese.py/0
{ "file_path": "transformers/tests/models/bert_japanese/test_tokenization_bert_japanese.py", "repo_id": "transformers", "token_count": 10552 }
546
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest from functools import lru_cache from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_jinja, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible @require_tokenizers class BloomTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "bigscience/tokenizer" slow_tokenizer_class = None rust_tokenizer_class = BloomTokenizerFast tokenizer_class = BloomTokenizerFast test_rust_tokenizer = True test_slow_tokenizer = False from_pretrained_vocab_key = "tokenizer_file" special_tokens_map = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} @classmethod def setUpClass(cls): super().setUpClass() tokenizer = BloomTokenizerFast.from_pretrained("bigscience/tokenizer") tokenizer.save_pretrained(cls.tmpdirname) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_rust_tokenizer(cls, pretrained_name=None, **kwargs): _kwargs = copy.deepcopy(cls.special_tokens_map) _kwargs.update(kwargs) kwargs = _kwargs pretrained_name = pretrained_name or cls.tmpdirname return BloomTokenizerFast.from_pretrained(pretrained_name, **kwargs) @unittest.skip(reason="This needs a slow tokenizer. Bloom does not have one!") def test_encode_decode_with_spaces(self): return def test_encodings_from_sample_data(self): """ Assert that the created tokens are the same than the hard-coded ones """ tokenizer = self.get_rust_tokenizer() INPUT_SENTENCES = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] TARGET_TOKENS = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] computed_tokens = tokenizer.batch_encode_plus(INPUT_SENTENCES)["input_ids"] self.assertListEqual(TARGET_TOKENS, computed_tokens) decoded_tokens = tokenizer.batch_decode(computed_tokens) self.assertListEqual(decoded_tokens, INPUT_SENTENCES) def test_padding(self, max_length=6): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(s, max_length=max_length) tokenizer_r.encode_plus(s, max_length=max_length) tokenizer_r.batch_encode_plus(s2, max_length=max_length) tokenizer_r.encode(p, max_length=max_length) tokenizer_r.batch_encode_plus(p2, max_length=max_length) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding") tokenizer_r.pad_token = None # Hotfixing padding = None self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) def test_encodings_from_xnli_dataset(self): """ Tests the tokenizer downloaded from here: - https://huggingface.co/bigscience/tokenizer/ """ tokenizer = self.get_rust_tokenizer() ds = load_dataset("facebook/xnli", "all_languages", split="test", streaming=True) sample_data = next(iter(ds))["premise"] # pick up one data input_text = list(sample_data.values()) output_tokens = list(map(tokenizer.encode, input_text)) predicted_text = [tokenizer.decode(x, clean_up_tokenization_spaces=False) for x in output_tokens] self.assertListEqual(predicted_text, input_text) @require_jinja def test_tokenization_for_chat(self): tokenizer = self.get_rust_tokenizer() tokenizer.chat_template = "{% for message in messages %}{{ message.content }}{{ eos_token }}{% endfor %}" test_chats = [ [{"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}], [ {"role": "system", "content": "You are a helpful chatbot."}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Nice to meet you."}, ], [{"role": "assistant", "content": "Nice to meet you."}, {"role": "user", "content": "Hello!"}], ] tokenized_chats = [tokenizer.apply_chat_template(test_chat) for test_chat in test_chats] expected_tokens = [ [5448, 1306, 267, 66799, 44799, 37143, 17, 2, 59414, 4, 2], [5448, 1306, 267, 66799, 44799, 37143, 17, 2, 59414, 4, 2, 229126, 427, 11890, 1152, 17, 2], [229126, 427, 11890, 1152, 17, 2, 59414, 4, 2], ] for tokenized_chat, expected_tokens in zip(tokenized_chats, expected_tokens): self.assertListEqual(tokenized_chat, expected_tokens) def test_add_prefix_space_fast(self): tokenizer_w_prefix = self.get_rust_tokenizer(add_prefix_space=True) tokenizer_wo_prefix = self.get_rust_tokenizer(add_prefix_space=False) tokens_w_prefix = tokenizer_w_prefix.tokenize("Hey") tokens_wo_prefix = tokenizer_wo_prefix.tokenize("Hey") self.assertNotEqual(tokens_w_prefix, tokens_wo_prefix)
transformers/tests/models/bloom/test_tokenization_bloom.py/0
{ "file_path": "transformers/tests/models/bloom/test_tokenization_bloom.py", "repo_id": "transformers", "token_count": 3399 }
547
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.image_utils import PILImageResampling from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChameleonImageProcessor if is_torchvision_available(): from transformers import ChameleonImageProcessorFast class ChameleonImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=200, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[1.0, 1.0, 1.0], image_std=[1.0, 1.0, 1.0], do_convert_rgb=True, resample=PILImageResampling.BILINEAR, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb self.resample = resample def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, "resample": self.resample, } # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.expected_output_image_shape def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ChameleonImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ChameleonImageProcessor if is_vision_available() else None fast_image_processing_class = ChameleonImageProcessorFast if is_torchvision_available() else None # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.setUp with CLIP->Chameleon def setUp(self): super().setUp() self.image_processor_tester = ChameleonImageProcessingTester(self) @property # Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTest.image_processor_dict def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) def test_nested_input(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True) # Test batched as a list of images encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched as a nested list of images, where each sublist is one batch image_inputs_nested = [image_inputs[:3], image_inputs[3:]] encoded_images_nested = image_processing(image_inputs_nested, return_tensors="pt").pixel_values expected_output_image_shape = (7, 3, 18, 18) self.assertEqual(tuple(encoded_images_nested.shape), expected_output_image_shape) # Image processor should return same pixel values, independently of input format self.assertTrue((encoded_images_nested == encoded_images).all())
transformers/tests/models/chameleon/test_image_processing_chameleon.py/0
{ "file_path": "transformers/tests/models/chameleon/test_image_processing_chameleon.py", "repo_id": "transformers", "token_count": 4102 }
548
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ConvBERT model.""" import unittest from transformers import ConvBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) class ConvBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return ConvBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = ConvBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = ConvBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = ConvBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ConvBertModel, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ConvBertModel, "fill-mask": ConvBertForMaskedLM, "question-answering": ConvBertForQuestionAnswering, "text-classification": ConvBertForSequenceClassification, "token-classification": ConvBertForTokenClassification, "zero-shot": ConvBertForSequenceClassification, } if is_torch_available() else {} ) test_pruning = False test_head_masking = False def setUp(self): self.model_tester = ConvBertModelTester(self) self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "YituTech/conv-bert-base" model = ConvBertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], ) def test_model_for_input_embeds(self): batch_size = 2 seq_length = 10 inputs_embeds = torch.rand([batch_size, seq_length, 768], device=torch_device) config = self.model_tester.get_config() model = ConvBertModel(config=config) model.to(torch_device) model.eval() result = model(inputs_embeds=inputs_embeds) self.assertEqual(result.last_hidden_state.shape, (batch_size, seq_length, config.hidden_size)) def test_reducing_attention_heads(self): config, *inputs_dict = self.model_tester.prepare_config_and_inputs() config.head_ratio = 4 self.model_tester.create_and_check_for_masked_lm(config, *inputs_dict) @require_torch class ConvBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = ConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0864, -0.4898, -0.3677], [0.1434, -0.2952, -0.7640], [-0.0112, -0.4432, -0.5432]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/convbert/test_modeling_convbert.py/0
{ "file_path": "transformers/tests/models/convbert/test_modeling_convbert.py", "repo_id": "transformers", "token_count": 9006 }
549
# Copyright 2018 Salesforce and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from functools import lru_cache from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible class CTRLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "Salesforce/ctrl" tokenizer_class = CTRLTokenizer test_rust_tokenizer = False test_seq2seq = False @classmethod def setUpClass(cls): super().setUpClass() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""] cls.special_tokens_map = {"unk_token": "<unk>"} cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(cls.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return CTRLTokenizer.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "adapt react readapt apt" output_text = "adapt react readapt apt" return input_text, output_text def test_full_tokenizer(self): tokenizer = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "adapt react readapt apt" bpe_tokens = "adapt re@@ a@@ c@@ t re@@ adapt apt".split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
transformers/tests/models/ctrl/test_tokenization_ctrl.py/0
{ "file_path": "transformers/tests/models/ctrl/test_tokenization_ctrl.py", "repo_id": "transformers", "token_count": 1187 }
550
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_torchvision, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor if is_torchvision_available(): from transformers import DetrImageProcessorFast class DetrImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to DetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DetrImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DetrImageProcessor if is_vision_available() else None fast_image_processing_class = DetrImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = DetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_pad")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) def test_should_raise_if_annotation_format_invalid(self): image_processor_dict = self.image_processor_tester.prepare_image_processor_dict() with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: detection_target = json.loads(f.read()) annotations = {"image_id": 39769, "annotations": detection_target} params = { "images": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "annotations": annotations, "return_tensors": "pt", } image_processor_params = {**image_processor_dict, **{"format": "_INVALID_FORMAT_"}} for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**image_processor_params) with self.assertRaises(ValueError) as e: image_processor(**params) self.assertTrue(str(e.exception).startswith("_INVALID_FORMAT_ is not a valid AnnotationFormat")) def test_valid_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) params = {"image_id": 39769, "annotations": target} for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class.from_pretrained("facebook/detr-resnet-50") # legal encodings (single image) _ = image_processing(images=image, annotations=params, return_tensors="pt") _ = image_processing(images=image, annotations=[params], return_tensors="pt") # legal encodings (batch of one image) _ = image_processing(images=[image], annotations=params, return_tensors="pt") _ = image_processing(images=[image], annotations=[params], return_tensors="pt") # legal encoding (batch of more than one image) n = 5 _ = image_processing(images=[image] * n, annotations=[params] * n, return_tensors="pt") # example of an illegal encoding (missing the 'image_id' key) with self.assertRaises(ValueError) as e: image_processing(images=image, annotations={"annotations": target}, return_tensors="pt") self.assertTrue(str(e.exception).startswith("Invalid COCO detection annotations")) # example of an illegal encoding (unequal lengths of images and annotations) with self.assertRaises(ValueError) as e: image_processing(images=[image] * n, annotations=[params] * (n - 1), return_tensors="pt") self.assertTrue(str(e.exception) == "The number of images (5) and annotations (4) do not match.") @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class.from_pretrained("facebook/detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class.from_pretrained("facebook/detr-resnet-50-panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) torch.testing.assert_close(encoding["pixel_values"][0, 0, 0, :3], expected_slice, rtol=1e-4, atol=1e-4) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) torch.testing.assert_close(encoding["labels"][0]["area"], expected_area) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) torch.testing.assert_close(encoding["labels"][0]["boxes"][0], expected_boxes_slice, rtol=1e-3, atol=1e-3) # verify image_id expected_image_id = torch.tensor([39769]) torch.testing.assert_close(encoding["labels"][0]["image_id"], expected_image_id) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) torch.testing.assert_close(encoding["labels"][0]["iscrowd"], expected_is_crowd) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) torch.testing.assert_close(encoding["labels"][0]["class_labels"], expected_class_labels) # verify masks expected_masks_sum = 822873 relative_error = torch.abs(encoding["labels"][0]["masks"].sum() - expected_masks_sum) / expected_masks_sum self.assertTrue(relative_error < 1e-3) # verify orig_size expected_orig_size = torch.tensor([480, 640]) torch.testing.assert_close(encoding["labels"][0]["orig_size"], expected_orig_size) # verify size expected_size = torch.tensor([800, 1066]) torch.testing.assert_close(encoding["labels"][0]["size"], expected_size) @slow def test_batched_coco_detection_annotations(self): image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) annotations_0 = {"image_id": 39769, "annotations": target} annotations_1 = {"image_id": 39769, "annotations": target} # Adjust the bounding boxes for the resized image w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotations_1["annotations"])): coords = annotations_1["annotations"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotations_1["annotations"][i]["bbox"] = new_bbox images = [image_0, image_1] annotations = [annotations_0, annotations_1] for image_processing_class in self.image_processor_list: image_processing = image_processing_class() encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, return_tensors="pt", # do_convert_annotations=True ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.6879, 0.4609, 0.0755, 0.3691], [0.2118, 0.3359, 0.2601, 0.1566], [0.5011, 0.5000, 0.9979, 1.0000], [0.5010, 0.5020, 0.9979, 0.9959], [0.3284, 0.5944, 0.5884, 0.8112], [0.8394, 0.5445, 0.3213, 0.9110], ] ) expected_boxes_1 = torch.tensor( [ [0.4130, 0.2765, 0.0453, 0.2215], [0.1272, 0.2016, 0.1561, 0.0940], [0.3757, 0.4933, 0.7488, 0.9865], [0.3759, 0.5002, 0.7492, 0.9955], [0.1971, 0.5456, 0.3532, 0.8646], [0.5790, 0.4115, 0.3430, 0.7161], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) def test_batched_coco_panoptic_annotations(self): # prepare image, target and masks_path image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotation_1["segments_info"])): coords = annotation_1["segments_info"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotation_1["segments_info"][i]["bbox"] = new_bbox masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") images = [image_0, image_1] annotations = [annotation_0, annotation_1] for image_processing_class in self.image_processor_list: # encode them image_processing = image_processing_class(format="coco_panoptic") encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_tensors="pt", return_segmentation_masks=True, ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.2625, 0.5437, 0.4688, 0.8625], [0.7719, 0.4104, 0.4531, 0.7125], [0.5000, 0.4927, 0.9969, 0.9854], [0.1688, 0.2000, 0.2063, 0.0917], [0.5492, 0.2760, 0.0578, 0.2187], [0.4992, 0.4990, 0.9984, 0.9979], ] ) expected_boxes_1 = torch.tensor( [ [0.1576, 0.3262, 0.2814, 0.5175], [0.4634, 0.2463, 0.2720, 0.4275], [0.3002, 0.2956, 0.5985, 0.5913], [0.1013, 0.1200, 0.1238, 0.0550], [0.3297, 0.1656, 0.0347, 0.1312], [0.2997, 0.2994, 0.5994, 0.5987], ] ) torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3, rtol=1e-3) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3, rtol=1e-3) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T torch.testing.assert_close(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1, rtol=1) torch.testing.assert_close(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1, rtol=1) def test_max_width_max_height_resizing_and_pad_strategy(self): for image_processing_class in self.image_processor_list: image_1 = torch.ones([200, 100, 3], dtype=torch.uint8) # do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50])) # do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") # do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100 image_processor = image_processing_class( size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100} ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100])) # do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100 image_processor = image_processing_class( size={"max_height": 300, "max_width": 100}, do_pad=True, pad_size={"height": 301, "width": 101}, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101])) ### Check for batch image_2 = torch.ones([100, 150, 3], dtype=torch.uint8) # do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100 image_processor = image_processing_class( size={"max_height": 150, "max_width": 100}, do_pad=True, pad_size={"height": 150, "width": 100}, ) inputs = image_processor(images=[image_1, image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100])) def test_longest_edge_shortest_edge_resizing_strategy(self): for image_processing_class in self.image_processor_list: image_1 = torch.ones([958, 653, 3], dtype=torch.uint8) # max size is set; width < height; # do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436 image_processor = image_processing_class( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436])) image_2 = torch.ones([653, 958, 3], dtype=torch.uint8) # max size is set; height < width; # do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640 image_processor = image_processing_class( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640])) image_3 = torch.ones([100, 120, 3], dtype=torch.uint8) # max size is set; width == size; height > max_size; # do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98 image_processor = image_processing_class( size={"longest_edge": 118, "shortest_edge": 100}, do_pad=False, ) inputs = image_processor(images=[image_3], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118])) image_4 = torch.ones([128, 50, 3], dtype=torch.uint8) # max size is set; height == size; width < max_size; # do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128 image_processor = image_processing_class( size={"longest_edge": 256, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_4], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50])) image_5 = torch.ones([50, 50, 3], dtype=torch.uint8) # max size is set; height == width; width < max_size; # do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50 image_processor = image_processing_class( size={"longest_edge": 117, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_5], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50])) @slow @require_torch_accelerator @require_torchvision def test_fast_processor_equivalence_cpu_accelerator_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} processor = self.image_processor_list[1]() # 1. run processor on CPU encoding_cpu = processor(images=image, annotations=target, return_tensors="pt", device="cpu") # 2. run processor on accelerator encoding_gpu = processor(images=image, annotations=target, return_tensors="pt", device=torch_device) # verify pixel values self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape) self.assertTrue( torch.allclose( encoding_cpu["pixel_values"][0, 0, 0, :3], encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"), atol=1e-4, ) ) # verify area torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")) # verify boxes self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape) self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3 ) ) # verify image_id torch.testing.assert_close( encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu") ) # verify is_crowd torch.testing.assert_close( encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu") ) # verify class_labels self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu") ) ) # verify orig_size torch.testing.assert_close( encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu") ) # verify size torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu")) @slow @require_torch_accelerator @require_torchvision def test_fast_processor_equivalence_cpu_accelerator_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") processor = self.image_processor_list[1](format="coco_panoptic") # 1. run processor on CPU encoding_cpu = processor( images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device="cpu" ) # 2. run processor on accelerator encoding_gpu = processor( images=image, annotations=target, masks_path=masks_path, return_tensors="pt", device=torch_device ) # verify pixel values self.assertEqual(encoding_cpu["pixel_values"].shape, encoding_gpu["pixel_values"].shape) self.assertTrue( torch.allclose( encoding_cpu["pixel_values"][0, 0, 0, :3], encoding_gpu["pixel_values"][0, 0, 0, :3].to("cpu"), atol=1e-4, ) ) # verify area torch.testing.assert_close(encoding_cpu["labels"][0]["area"], encoding_gpu["labels"][0]["area"].to("cpu")) # verify boxes self.assertEqual(encoding_cpu["labels"][0]["boxes"].shape, encoding_gpu["labels"][0]["boxes"].shape) self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["boxes"][0], encoding_gpu["labels"][0]["boxes"][0].to("cpu"), atol=1e-3 ) ) # verify image_id torch.testing.assert_close( encoding_cpu["labels"][0]["image_id"], encoding_gpu["labels"][0]["image_id"].to("cpu") ) # verify is_crowd torch.testing.assert_close( encoding_cpu["labels"][0]["iscrowd"], encoding_gpu["labels"][0]["iscrowd"].to("cpu") ) # verify class_labels self.assertTrue( torch.allclose( encoding_cpu["labels"][0]["class_labels"], encoding_gpu["labels"][0]["class_labels"].to("cpu") ) ) # verify masks masks_sum_cpu = encoding_cpu["labels"][0]["masks"].sum() masks_sum_gpu = encoding_gpu["labels"][0]["masks"].sum() relative_error = torch.abs(masks_sum_cpu - masks_sum_gpu) / masks_sum_cpu self.assertTrue(relative_error < 1e-3) # verify orig_size torch.testing.assert_close( encoding_cpu["labels"][0]["orig_size"], encoding_gpu["labels"][0]["orig_size"].to("cpu") ) # verify size torch.testing.assert_close(encoding_cpu["labels"][0]["size"], encoding_gpu["labels"][0]["size"].to("cpu"))
transformers/tests/models/detr/test_image_processing_detr.py/0
{ "file_path": "transformers/tests/models/detr/test_image_processing_detr.py", "repo_id": "transformers", "token_count": 18325 }
551
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ConvNext model.""" import unittest from transformers import DINOv3ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DINOv3ConvNextModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DINOv3ConvNextModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, hidden_sizes=[10, 20, 30, 40], depths=[2, 2, 3, 2], is_training=False, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DINOv3ConvNextConfig( num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, is_decoder=False, initializer_range=self.initializer_range, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = DINOv3ConvNextModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, 1 + self.image_size // 32 * self.image_size // 32, self.hidden_sizes[-1], ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class DINOv3ConvNextModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ConvNext does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (DINOv3ConvNextModel,) if is_torch_available() else () pipeline_model_mapping = {"image-feature-extraction": DINOv3ConvNextModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = DINOv3ConvNextModelTester(self) self.config_tester = ConfigTester( self, config_class=DINOv3ConvNextConfig, has_text_modality=False, hidden_size=37, common_properties=["num_channels", "hidden_sizes"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="DINOv3ConvNext does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="DINOv3ConvNext does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="DINOv3ConvNext does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states self.assertEqual(len(hidden_states), 5) # DINOv3ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[1].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): model_name = "facebook/dinov3-convnext-tiny-pretrain-lvd1689m" model = DINOv3ConvNextModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="DINOv3ConvNext does not retain grads for first hidden state (original pixel_values)") def test_retain_grad_hidden_states_attentions(self): pass # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class DINOv3ConvNextModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("facebook/dinov3-convnext-tiny-pretrain-lvd1689m") if is_vision_available() else None ) @slow def test_inference_no_head(self): model = DINOv3ConvNextModel.from_pretrained("facebook/dinov3-convnext-tiny-pretrain-lvd1689m").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the last hidden states _, _, height, width = inputs["pixel_values"].shape expected_seq_length = (height * width) // 4 ** (model.config.num_stages + 1) + 1 # +1 for the "CLS" token expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_sizes[-1])) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) last_layer_cls_token = outputs.pooler_output expected_slice = torch.tensor([-6.3721, 1.3008, 2.0743, -0.0800, 0.6072], device=torch_device) torch.testing.assert_close(last_layer_cls_token[0, :5], expected_slice, rtol=1e-4, atol=1e-4) last_layer_patch_tokens = outputs.last_hidden_state[:, 1:] expected_slice = torch.tensor([0.4905, -3.7135, 1.8485, -1.0403, -1.0908], device=torch_device) torch.testing.assert_close(last_layer_patch_tokens[0, 0, :5], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py/0
{ "file_path": "transformers/tests/models/dinov3_convnext/test_modeling_dinov3_convnext.py", "repo_id": "transformers", "token_count": 3752 }
552
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch dots1 model.""" import gc import unittest import pytest from transformers import AutoTokenizer, Dots1Config, is_torch_available from transformers.testing_utils import ( backend_empty_cache, cleanup, require_flash_attn, require_torch, require_torch_accelerator, require_torch_gpu, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( Dots1ForCausalLM, Dots1Model, ) class Dots1ModelTester(CausalLMModelTester): config_class = Dots1Config if is_torch_available(): base_model_class = Dots1Model causal_lm_class = Dots1ForCausalLM def __init__( self, parent, n_routed_experts=8, n_shared_experts=1, n_group=1, topk_group=1, num_experts_per_tok=8, ): super().__init__(parent=parent, num_experts_per_tok=num_experts_per_tok) self.n_routed_experts = n_routed_experts self.n_shared_experts = n_shared_experts self.n_group = n_group self.topk_group = topk_group @require_torch class Dots1ModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( Dots1Model, Dots1ForCausalLM, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Dots1Model, "text-generation": Dots1ForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False model_tester_class = Dots1ModelTester @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="dots.llm1 flash attention does not support right padding") @require_torch_accelerator class Dots1IntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] def tearDown(self): # See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed. cleanup(torch_device, gc_collect=False) @slow def test_model_15b_a2b_generation(self): EXPECTED_TEXT_COMPLETION = ( """To be or not to be, that is the question:\nWhether 'tis nobler in the mind to suffer\nThe""" ) prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("redmoe-ai-v1/dots.llm1.test", use_fast=False) model = Dots1ForCausalLM.from_pretrained("redmoe-ai-v1/dots.llm1.test", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) del model backend_empty_cache(torch_device) gc.collect()
transformers/tests/models/dots1/test_modeling_dots1.py/0
{ "file_path": "transformers/tests/models/dots1/test_modeling_dots1.py", "repo_id": "transformers", "token_count": 1737 }
553
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Ernie4.5 model.""" import unittest from transformers import is_torch_available from transformers.testing_utils import ( Expectations, cleanup, require_torch, require_torch_accelerator, slow, torch_device, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): import torch from transformers import ( AutoTokenizer, Ernie4_5Config, Ernie4_5ForCausalLM, Ernie4_5Model, ) from transformers.models.ernie4_5.modeling_ernie4_5 import Ernie4_5RotaryEmbedding class Ernie4_5ModelTester(CausalLMModelTester): if is_torch_available(): config_class = Ernie4_5Config base_model_class = Ernie4_5Model causal_lm_class = Ernie4_5ForCausalLM @require_torch class Ernie4_5ModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( Ernie4_5Model, Ernie4_5ForCausalLM, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Ernie4_5Model, "text-generation": Ernie4_5ForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False # Broken by attention refactor cc @Cyrilvallez model_tester_class = Ernie4_5ModelTester rotary_embedding_layer = Ernie4_5RotaryEmbedding # Enables RoPE tests if set # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = Ernie4_5ForCausalLM if is_torch_available() else None @require_torch_accelerator class Ernie4_5IntegrationTest(unittest.TestCase): def setup(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_ernie4_5_0p3B(self): """ An integration test for Ernie 4.5 0.3B. """ expected_texts = Expectations( { ("cuda", None): "User: Hey, are you conscious? Can you talk to me?\nAssistant: Hey! I'm here to help you with whatever you need. Are you feeling a bit overwhelmed or stressed? I'm here to listen and provide support.", } ) # fmt: skip EXPECTED_TEXT = expected_texts.get_expectation() tokenizer = AutoTokenizer.from_pretrained("baidu/ERNIE-4.5-0.3B-PT", revision="refs/pr/3") model = Ernie4_5ForCausalLM.from_pretrained( "baidu/ERNIE-4.5-0.3B-PT", device_map="auto", dtype=torch.bfloat16, ) prompt = "Hey, are you conscious? Can you talk to me?" messages = [{"role": "user", "content": prompt}] text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) model_inputs = tokenizer([text], add_special_tokens=False, return_tensors="pt").to(model.device) generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=128, do_sample=False, ) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True).strip("\n") self.assertEqual(generated_text, EXPECTED_TEXT)
transformers/tests/models/ernie4_5/test_modeling_ernie4_5.py/0
{ "file_path": "transformers/tests/models/ernie4_5/test_modeling_ernie4_5.py", "repo_id": "transformers", "token_count": 1701 }
554
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import AutoProcessor, GotOcr2Processor, PreTrainedTokenizerFast from transformers.testing_utils import require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import GotOcr2ImageProcessor @require_vision class GotOcr2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = GotOcr2Processor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = GotOcr2ImageProcessor() tokenizer = PreTrainedTokenizerFast.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf") processor_kwargs = {} processor = GotOcr2Processor(image_processor, tokenizer, **processor_kwargs) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.img_pad_token def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_ocr_queries(self): processor = self.get_processor() image_input = self.prepare_image_inputs() inputs = processor(image_input, return_tensors="pt") self.assertEqual(inputs["input_ids"].shape, (1, 286)) self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384)) inputs = processor(image_input, return_tensors="pt", format=True) self.assertEqual(inputs["input_ids"].shape, (1, 288)) self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384)) inputs = processor(image_input, return_tensors="pt", color="red") self.assertEqual(inputs["input_ids"].shape, (1, 290)) self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384)) inputs = processor(image_input, return_tensors="pt", box=[0, 0, 100, 100]) self.assertEqual(inputs["input_ids"].shape, (1, 303)) self.assertEqual(inputs["pixel_values"].shape, (1, 3, 384, 384)) inputs = processor([image_input, image_input], return_tensors="pt", multi_page=True, format=True) self.assertEqual(inputs["input_ids"].shape, (1, 547)) self.assertEqual(inputs["pixel_values"].shape, (2, 3, 384, 384)) inputs = processor(image_input, return_tensors="pt", crop_to_patches=True, max_patches=6) self.assertEqual(inputs["input_ids"].shape, (1, 1826)) self.assertEqual(inputs["pixel_values"].shape, (7, 3, 384, 384))
transformers/tests/models/got_ocr2/test_processing_got_ocr2.py/0
{ "file_path": "transformers/tests/models/got_ocr2/test_processing_got_ocr2.py", "repo_id": "transformers", "token_count": 1249 }
555
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import shutil import tempfile import unittest from parameterized import parameterized from transformers import AutoProcessor, AutoTokenizer, InternVLProcessor from transformers.testing_utils import require_av, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_processing_common import MODALITY_INPUT_DATA, ProcessorTesterMixin if is_torch_available(): import torch if is_vision_available(): from transformers import GotOcr2ImageProcessor, InternVLVideoProcessor @require_vision class InternVLProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = InternVLProcessor videos_input_name = "pixel_values" @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = GotOcr2ImageProcessor( do_resize=True, size={"height": 20, "width": 20}, max_patches=2, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, do_center_crop=True, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225], do_convert_rgb=True, ) video_processor = InternVLVideoProcessor( do_resize=True, size={"height": 20, "width": 20}, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225], do_convert_rgb=True, ) tokenizer = AutoTokenizer.from_pretrained("OpenGVLab/InternVL3-1B-hf", padding_side="left") processor_kwargs = cls.prepare_processor_dict() processor = InternVLProcessor( image_processor=image_processor, tokenizer=tokenizer, video_processor=video_processor, **processor_kwargs, ) processor.save_pretrained(cls.tmpdirname) cls.image_token = processor.image_token cls.video_token = processor.video_token @staticmethod def prepare_processor_dict(): return {"image_seq_length": 2} def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def get_video_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor def get_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs) @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) # Copied from tests.models.llava.test_processing_llava.LlavaProcessorTest.test_get_num_vision_tokens def test_get_num_vision_tokens(self): "Tests general functionality of the helper used internally in vLLM" processor = self.get_processor() output = processor._get_num_multimodal_tokens(image_sizes=[(100, 100), (300, 100), (500, 30)]) self.assertTrue("num_image_tokens" in output) self.assertEqual(len(output["num_image_tokens"]), 3) self.assertTrue("num_image_patches" in output) self.assertEqual(len(output["num_image_patches"]), 3) @require_av @require_torch def test_process_interleaved_images_videos(self): processor = self.get_processor() messages = [ [ { "role": "user", "content": [ { "type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, {"type": "text", "text": "What are the differences between these two images?"}, ], }, ], [ { "role": "user", "content": [ { "type": "video", "url": "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4", }, {"type": "text", "text": "What type of shot is the man performing?"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg", }, {"type": "text", "text": "Write a haiku for this image"}, ], } ], ] inputs_batched = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding=True, num_frames=8, ) # Process non batched inputs to check if the pixel_values and input_ids are reconstructed in the correct order when batched together images_patches_index = 0 for i, message in enumerate(messages): inputs = processor.apply_chat_template( message, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", padding=True, num_frames=8, ) # We slice with [-inputs["input_ids"].shape[1] :] as the input_ids are left padded torch.testing.assert_close( inputs["input_ids"][0], inputs_batched["input_ids"][i][-inputs["input_ids"].shape[1] :] ) torch.testing.assert_close( inputs["pixel_values"], inputs_batched["pixel_values"][ images_patches_index : images_patches_index + inputs["pixel_values"].shape[0] ], ) images_patches_index += inputs["pixel_values"].shape[0] @require_torch @require_av def test_apply_chat_template_video_frame_sampling(self): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") signature = inspect.signature(processor.__call__) if "videos" not in {*signature.parameters.keys()} or ( signature.parameters.get("videos") is not None and signature.parameters["videos"].annotation == inspect._empty ): self.skipTest("Processor doesn't accept videos at input") messages = [ [ { "role": "user", "content": [ { "type": "video", "url": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4", }, {"type": "text", "text": "What is shown in this video?"}, ], }, ] ] num_frames = 3 out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, num_frames=num_frames, return_tensors="pt", ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), num_frames) # Load with `fps` arg is not possible with InternVL (skip) # Load without any arg should use the default loading method out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 300) # Load video as a list of frames (i.e. images). NOTE: each frame should have same size # because we assume they come from one video messages[0][0]["content"][0] = { "type": "video", "url": [ "https://www.ilankelman.org/stopsigns/australia.jpg", "https://www.ilankelman.org/stopsigns/australia.jpg", ], } out_dict_with_video = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ) self.assertTrue(self.videos_input_name in out_dict_with_video) self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2) @require_av @parameterized.expand([(1, "pt"), (2, "pt"), (3, "pt")]) def test_apply_chat_template_video(self, batch_size: int, return_tensors: str): processor = self.get_processor() if processor.chat_template is None: self.skipTest("Processor has no chat template") if "video_processor" not in self.processor_class.attributes: self.skipTest(f"`video_processor` attribute not present in {self.processor_class}") batch_messages = [ [ { "role": "user", "content": [{"type": "text", "text": "Describe this."}], }, ] ] * batch_size # Test that jinja can be applied formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False) self.assertEqual(len(formatted_prompt), batch_size) # Test that tokenizing with template and directly with `self.tokenizer` gives same output formatted_prompt_tokenized = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_tensors="pt" ) add_special_tokens = True if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token): add_special_tokens = False tok_output = processor.tokenizer(formatted_prompt, return_tensors="pt", add_special_tokens=add_special_tokens) expected_output = tok_output.input_ids self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist()) # Test that kwargs passed to processor's `__call__` are actually used tokenized_prompt_100 = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, padding="max_length", truncation=True, return_tensors="pt", max_length=100, ) self.assertEqual(len(tokenized_prompt_100[0]), 100) # Test that `return_dict=True` returns text related inputs in the dict out_dict_text = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ) self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"])) self.assertEqual(len(out_dict_text["input_ids"]), batch_size) self.assertEqual(len(out_dict_text["attention_mask"]), batch_size) # Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict for idx, url in enumerate(MODALITY_INPUT_DATA["videos"][:batch_size]): batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": "video", "url": url}] out_dict = processor.apply_chat_template( batch_messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", num_frames=2, # by default no more than 2 frames, otherwise too slow ) self.assertTrue(self.videos_input_name in out_dict) self.assertEqual(len(out_dict["input_ids"]), batch_size) self.assertEqual(len(out_dict["attention_mask"]), batch_size) # InternVL internally collects frames from all the videos in a batch and flattens the batch dimension (B T C H W) -> (B*T C H W) then patches and removes the frames # hence output length does not equal batch size # removed hardcoded video length check video_len = 2 if batch_size == 1 else 3 # from experiment video_len looks like batch_size + 1 # TODO: update expected video_len calculation based on the internal processing logic of InternVLProcessor video_len = batch_size + 1 self.assertEqual(len(out_dict[self.videos_input_name]), video_len) for k in out_dict: self.assertIsInstance(out_dict[k], torch.Tensor) # Test continue from final message assistant_message = { "role": "assistant", "content": [{"type": "text", "text": "It is the sound of"}], } for batch_idx in range(batch_size): batch_messages[batch_idx] = batch_messages[batch_idx] + [assistant_message] continue_prompt = processor.apply_chat_template(batch_messages, continue_final_message=True, tokenize=False) for prompt in continue_prompt: self.assertTrue(prompt.endswith("It is the sound of")) # no `eos` token at the end
transformers/tests/models/internvl/test_processing_internvl.py/0
{ "file_path": "transformers/tests/models/internvl/test_processing_internvl.py", "repo_id": "transformers", "token_count": 6952 }
556
# coding=utf-8 # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest from tempfile import TemporaryDirectory import numpy as np import pytest import requests from transformers.testing_utils import ( require_torch, require_vision, ) from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, AutoTokenizer, Kosmos2_5ImageProcessor, Kosmos2_5Processor, PreTrainedTokenizerFast, ) @require_vision class Kosmos2_5ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Kosmos2_5Processor images_input_name = "flattened_patches" def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = Kosmos2_5ImageProcessor() tokenizer = AutoTokenizer.from_pretrained("ydshieh/kosmos-2.5") processor = Kosmos2_5Processor(image_processor, tokenizer) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def tearDown(self): shutil.rmtree(self.tmpdirname) def test_image_procesor_load_save_reload(self): # make sure load from Hub repo. -> save -> reload locally work image_processor = Kosmos2_5ImageProcessor.from_pretrained("ydshieh/kosmos-2.5") with TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = Kosmos2_5ImageProcessor.from_pretrained(tmp_dir) assert image_processor.to_dict() == reloaded_image_processor.to_dict() assert image_processor.to_json_string() == reloaded_image_processor.to_json_string() def test_save_load_pretrained_additional_features(self): processor = Kosmos2_5Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2_5Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0, ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual( processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string(), ) self.assertIsInstance(processor.image_processor, Kosmos2_5ImageProcessor) @unittest.skip(reason="kosmos-2.5 must have both image and text") def test_image_processor(self): pass @unittest.skip(reason="kosmos-2.5 must have both image and text") def test_tokenizer(self): pass def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_can_load_various_tokenizers(self): for checkpoint in ["ydshieh/kosmos-2.5"]: processor = AutoProcessor.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__) @require_torch def test_model_input_names(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() # both image and text inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), [ "flattened_patches", "attention_mask", "width", "height", "input_ids", "image_embeds_position_mask", ], ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() @require_torch @require_vision def test_image_processor_defaults_preserved_by_image_kwargs(self): # Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values" if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor", max_patches=1024, patch_size={"height": 8, "width": 8}) tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertEqual(len(inputs["flattened_patches"][0][0]), 194) @require_torch @require_vision def test_kwargs_overrides_default_image_processor_kwargs(self): # Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values" if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor", max_patches=4096) tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input, max_patches=1024) self.assertEqual(len(inputs["flattened_patches"][0]), 1024) @require_torch @require_vision def test_unstructured_kwargs(self): # Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor` if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_patches=1024, padding="max_length", max_length=76, ) self.assertEqual(inputs["flattened_patches"].shape[1], 1024) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch @require_vision def test_unstructured_kwargs_batched(self): # Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor` if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(batch_size=2) image_input = self.prepare_image_inputs(batch_size=2) inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_patches=1024, padding="longest", max_length=76, ) self.assertEqual(inputs["flattened_patches"].shape[1], 1024) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch @require_vision def test_structured_kwargs_nested(self): # Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor` if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"max_patches": 1024}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.skip_processor_without_typed_kwargs(processor) self.assertEqual(inputs["flattened_patches"].shape[1], 1024) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch @require_vision def test_structured_kwargs_nested_from_dict(self): # Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor` if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"max_patches": 1024}, "text_kwargs": {"padding": "max_length", "max_length": 76}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.assertEqual(inputs["flattened_patches"].shape[1], 1024) self.assertEqual(len(inputs["input_ids"][0]), 76) @require_torch def test_full_processor(self): url = "https://huggingface.co/kirp/kosmos2_5/resolve/main/receipt_00008.png" processor = AutoProcessor.from_pretrained("ydshieh/kosmos-2.5") texts = ["<md>", "<ocr>"] expected_input_ids = [ [100288], [100282], ] expected_attention_mask = [[1], [1]] image = Image.open(requests.get(url, stream=True).raw) # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed image_path = os.path.join(self.tmpdirname, "image.png") image.save(image_path) image = Image.open(image_path) # test single image outputs = processor(images=image, text=texts[0]) self.assertListEqual( outputs.input_ids[0].numpy().tolist(), [0, 100283] + [0] * 2048 + [100284] + expected_input_ids[0], ) self.assertListEqual( outputs.image_embeds_position_mask[0].numpy().tolist(), [0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0].numpy().tolist(), [1, 1] + [1] * 2048 + [1] + expected_attention_mask[0], ) EXPECTED_FP_1 = [ 1.0, 2.0, -2.9527735710144043, -2.672085762023926, -2.9933173656463623, -2.905944585800171, -2.5891761779785156, -2.8751866817474365, -2.962153434753418, -2.588062047958374, ] EXPECTED_FP_200 = [ 4.0, 45.0, 1.5713728666305542, 1.584628939628601, 1.3589054346084595, 1.6515952348709106, 1.7014952898025513, 1.3731343746185303, 1.6010395288467407, 1.6607422828674316, ] self.assertTupleEqual(outputs.flattened_patches.shape, (1, 4096, 770)) np.testing.assert_allclose( outputs.flattened_patches[0][1][:10].numpy().tolist(), EXPECTED_FP_1, atol=1e-9, ) np.testing.assert_allclose( outputs.flattened_patches[0][200][:10].numpy().tolist(), EXPECTED_FP_200, atol=1e-9, ) # test a batch of images and texts, right padding outputs = processor(images=[image, image], text=texts) self.assertListEqual( outputs.input_ids[1].numpy().tolist(), [0, 100283] + [0] * 2048 + [100284] + expected_input_ids[1], ) self.assertListEqual( outputs.image_embeds_position_mask[1].numpy().tolist(), [0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[1])), ) self.assertListEqual( outputs.attention_mask[1].numpy().tolist(), [1, 1] + [1] * 2048 + [1] + expected_attention_mask[1], ) self.assertTupleEqual(outputs.flattened_patches.shape, (2, 4096, 770)) np.testing.assert_allclose( outputs.flattened_patches[1][1][:10].numpy().tolist(), EXPECTED_FP_1, atol=1e-9, ) np.testing.assert_allclose( outputs.flattened_patches[1][200][:10].numpy().tolist(), EXPECTED_FP_200, atol=1e-9, )
transformers/tests/models/kosmos2_5/test_processor_kosmos2_5.py/0
{ "file_path": "transformers/tests/models/kosmos2_5/test_processor_kosmos2_5.py", "repo_id": "transformers", "token_count": 6759 }
557
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from functools import lru_cache from transformers import AddedToken, LukeTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible SAMPLE_VOCAB = get_tests_dir("fixtures/vocab.json") SAMPLE_MERGE_FILE = get_tests_dir("fixtures/merges.txt") SAMPLE_ENTITY_VOCAB = get_tests_dir("fixtures/test_entity_vocab.json") class LukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "studio-ousia/luke-base" tokenizer_class = LukeTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"cls_token": "<s>"} @classmethod def setUpClass(cls): super().setUpClass() cls.special_tokens_map = {"entity_token_1": "<ent>", "entity_token_2": "<ent2>"} @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, task=None, **kwargs): kwargs.update(cls.special_tokens_map) tokenizer = LukeTokenizer( vocab_file=SAMPLE_VOCAB, merges_file=SAMPLE_MERGE_FILE, entity_vocab_file=SAMPLE_ENTITY_VOCAB, task=task, **kwargs, ) return tokenizer def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.get_tokenizer() text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "Ġ", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) # , add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("studio-ousia/luke-large") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertEqual(encoded_sentence, encoded_text_from_decode) self.assertEqual(encoded_pair, encoded_pair_from_decode) def get_clean_sequence(self, tokenizer, max_length=20) -> tuple[str, list]: txt = "Beyonce lives in Los Angeles" ids = tokenizer.encode(txt, add_special_tokens=False) return txt, ids def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[b" "[0]] # Testing encoder arguments encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) # Testing spaces after special tokens mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) # mask token has a left space mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) @unittest.skip def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesn't self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_padding_entity_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) pad_id = tokenizer.entity_vocab["[PAD]"] mask_id = tokenizer.entity_vocab["[MASK]"] encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]]) # test with a sentence with no entity encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True) self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]]) def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer() sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." spans = [(15, 34)] entities = ["East Asian language"] with self.assertRaises(ValueError): tokenizer(sentence, entities=tuple(entities), entity_spans=spans) with self.assertRaises(TypeError): tokenizer(sentence, entities=entities, entity_spans=tuple(spans)) with self.assertRaises(ValueError): tokenizer(sentence, entities=[0], entity_spans=spans) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=[0]) with self.assertRaises(ValueError): tokenizer(sentence, entities=entities, entity_spans=spans + [(0, 9)]) def test_if_tokenize_entity_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." span = (15, 34) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[span, span]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0]) def test_if_tokenize_entity_pair_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_pair_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." # head and tail information with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0]) def test_if_tokenize_entity_span_classification_raise_error_with_invalid_inputs(self): tokenizer = self.get_tokenizer(task="entity_span_classification") sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan." with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[]) with self.assertRaises(ValueError): tokenizer(sentence, entity_spans=[0, 0, 0]) @slow @require_torch class LukeTokenizerIntegrationTests(unittest.TestCase): tokenizer_class = LukeTokenizer from_pretrained_kwargs = {"cls_token": "<s>"} def setUp(self): super().setUp() def test_single_text_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_single_text_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][9:10], spaces_between_special_tokens=False), " she") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, ] ] ) # fmt: on def test_single_text_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday", "Dummy Entity"] spans = [(9, 21), (30, 38), (39, 42)] encoding = tokenizer( sentence, entities=entities, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_text_pair_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") self.assertEqual( encoding["entity_ids"], [ tokenizer.entity_vocab["Ana Ivanovic"], tokenizer.entity_vocab["Thursday"], tokenizer.entity_vocab["[UNK]"], ], ) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_text_pair_only_entity_spans_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, ) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:6], spaces_between_special_tokens=False), " Ana Ivanovic" ) self.assertEqual( tokenizer.decode(encoding["input_ids"][8:9], spaces_between_special_tokens=False), " Thursday" ) self.assertEqual(tokenizer.decode(encoding["input_ids"][11:12], spaces_between_special_tokens=False), "She") mask_id = tokenizer.entity_vocab["[MASK]"] self.assertEqual(encoding["entity_ids"], [mask_id, mask_id, mask_id]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_text_pair_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", return_token_type_ids=True) sentence = "Top seed Ana Ivanovic said on Thursday" sentence_pair = "She could hardly believe her luck." entities = ["Ana Ivanovic", "Thursday"] entities_pair = ["Dummy Entity"] spans = [(9, 21), (30, 38)] spans_pair = [(0, 3)] encoding = tokenizer( sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) def test_entity_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification") sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) span = (39, 42) encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True) # test words self.assertEqual(len(encoding["input_ids"]), 42) self.assertEqual(len(encoding["attention_mask"]), 42) self.assertEqual(len(encoding["token_type_ids"]), 42) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday<ent> she<ent> could hardly believe her luck as a fortuitous" " netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][9:12], spaces_between_special_tokens=False), "<ent> she<ent>" ) # test entities self.assertEqual(encoding["entity_ids"], [2]) self.assertEqual(encoding["entity_attention_mask"], [1]) self.assertEqual(encoding["entity_token_type_ids"], [0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [9, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] ] ) # fmt: on def test_entity_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_classification", return_token_type_ids=True ) sentence = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped" " the new world number one avoid a humiliating second- round exit at Wimbledon ." ) # entity information span = (39, 42) encoding = tokenizer( sentence, entity_spans=[span], return_token_type_ids=True, padding="max_length", return_tensors="pt" ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 512)) self.assertEqual(encoding["attention_mask"].shape, (1, 512)) self.assertEqual(encoding["token_type_ids"].shape, (1, 512)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 1)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 1)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 1)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_pair_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." # head and tail information spans = [(9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed<ent> Ana Ivanovic<ent> said on Thursday<ent2> she<ent2> could hardly believe her luck.</s>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][3:8], spaces_between_special_tokens=False), "<ent> Ana Ivanovic<ent>", ) self.assertEqual( tokenizer.decode(encoding["input_ids"][11:14], spaces_between_special_tokens=False), "<ent2> she<ent2>" ) self.assertEqual(encoding["entity_ids"], [2, 3]) self.assertEqual(encoding["entity_attention_mask"], [1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [3, 4, 5, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [11, 12, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on def test_entity_pair_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_pair_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." # head and tail information spans = [(9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 2)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 2)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 2)) self.assertEqual( encoding["entity_position_ids"].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length) ) def test_entity_span_classification_no_padding_or_truncation(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True) self.assertEqual( tokenizer.decode(encoding["input_ids"], spaces_between_special_tokens=False), "<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>", ) self.assertEqual(encoding["entity_ids"], [2, 2, 2]) self.assertEqual(encoding["entity_attention_mask"], [1, 1, 1]) self.assertEqual(encoding["entity_token_type_ids"], [0, 0, 0]) # fmt: off self.assertEqual( encoding["entity_position_ids"], [ [1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [3, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], ] ) # fmt: on self.assertEqual(encoding["entity_start_positions"], [1, 3, 9]) self.assertEqual(encoding["entity_end_positions"], [2, 5, 9]) def test_entity_span_classification_padding_pytorch_tensors(self): tokenizer = LukeTokenizer.from_pretrained( "studio-ousia/luke-base", task="entity_span_classification", return_token_type_ids=True ) sentence = "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck." spans = [(0, 8), (9, 21), (39, 42)] encoding = tokenizer( sentence, entity_spans=spans, return_token_type_ids=True, padding="max_length", max_length=30, max_entity_length=16, return_tensors="pt", ) # test words self.assertEqual(encoding["input_ids"].shape, (1, 30)) self.assertEqual(encoding["attention_mask"].shape, (1, 30)) self.assertEqual(encoding["token_type_ids"].shape, (1, 30)) # test entities self.assertEqual(encoding["entity_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_attention_mask"].shape, (1, 16)) self.assertEqual(encoding["entity_token_type_ids"].shape, (1, 16)) self.assertEqual(encoding["entity_position_ids"].shape, (1, 16, tokenizer.max_mention_length)) self.assertEqual(encoding["entity_start_positions"].shape, (1, 16)) self.assertEqual(encoding["entity_end_positions"].shape, (1, 16))
transformers/tests/models/luke/test_tokenization_luke.py/0
{ "file_path": "transformers/tests/models/luke/test_tokenization_luke.py", "repo_id": "transformers", "token_count": 14148 }
558
# Copyright 2022 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MarkupLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MarkupLMForQuestionAnswering, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, MarkupLMModel, ) # TODO check dependencies from transformers import MarkupLMFeatureExtractor, MarkupLMProcessor, MarkupLMTokenizer class MarkupLMModelTester: """You can also import this e.g from .test_modeling_markuplm import MarkupLMModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, scope=None, max_xpath_tag_unit_embeddings=20, max_xpath_subs_unit_embeddings=30, tag_pad_id=2, subs_pad_id=2, max_depth=10, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings self.tag_pad_id = tag_pad_id self.subs_pad_id = subs_pad_id self.max_depth = max_depth def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) xpath_tags_seq = ids_tensor( [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_tag_unit_embeddings ) xpath_subs_seq = ids_tensor( [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_subs_unit_embeddings ) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return ( config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ) def get_config(self): return MarkupLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, max_xpath_tag_unit_embeddings=self.max_xpath_tag_unit_embeddings, max_xpath_subs_unit_embeddings=self.max_xpath_subs_unit_embeddings, tag_pad_id=self.tag_pad_id, subs_pad_id=self.subs_pad_id, max_depth=self.max_depth, ) def create_and_check_model( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): model = MarkupLMModel(config=config) model.to(torch_device) model.eval() print("Configs:", model.config.tag_pad_id, model.config.subs_pad_id) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_sequence_classification( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = MarkupLMForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): config.num_labels = self.num_labels model = MarkupLMForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ): model = MarkupLMForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, xpath_tags_seq=xpath_tags_seq, xpath_subs_seq=xpath_subs_seq, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, xpath_tags_seq, xpath_subs_seq, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "xpath_tags_seq": xpath_tags_seq, "xpath_subs_seq": xpath_subs_seq, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class MarkupLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MarkupLMModel, MarkupLMForSequenceClassification, MarkupLMForTokenClassification, MarkupLMForQuestionAnswering, ) if is_torch_available() else None ) pipeline_model_mapping = ( { "feature-extraction": MarkupLMModel, "question-answering": MarkupLMForQuestionAnswering, "text-classification": MarkupLMForSequenceClassification, "token-classification": MarkupLMForTokenClassification, "zero-shot": MarkupLMForSequenceClassification, } if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): # ValueError: Nodes must be of type `list[str]` (single pretokenized example), or `list[list[str]]` # (batch of pretokenized examples). return True def setUp(self): self.model_tester = MarkupLMModelTester(self) self.config_tester = ConfigTester(self, config_class=MarkupLMConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def prepare_html_string(): html_string = """ <!DOCTYPE html> <html> <head> <title>Page Title</title> </head> <body> <h1>This is a Heading</h1> <p>This is a paragraph.</p> </body> </html> """ return html_string @require_torch class MarkupLMModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): # TODO use from_pretrained here feature_extractor = MarkupLMFeatureExtractor() tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") return MarkupLMProcessor(feature_extractor, tokenizer) @slow def test_forward_pass_no_head(self): model = MarkupLMModel.from_pretrained("microsoft/markuplm-base").to(torch_device) processor = self.default_processor inputs = processor(prepare_html_string(), return_tensors="pt") inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the last hidden states expected_shape = torch.Size([1, 14, 768]) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[0.0675, -0.0052, 0.5001], [-0.2281, 0.0802, 0.2192], [-0.0583, -0.3311, 0.1185]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/markuplm/test_modeling_markuplm.py/0
{ "file_path": "transformers/tests/models/markuplm/test_modeling_markuplm.py", "repo_id": "transformers", "token_count": 6212 }
559
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # Copyright 2021 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MegatronBERT model.""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class MegatronBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=64, embedding_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.embedding_size = embedding_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return MegatronBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_megatron_bert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MegatronBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_megatron_bert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MegatronBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_megatron_bert_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MegatronBertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_megatron_bert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MegatronBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_megatron_bert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MegatronBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_megatron_bert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MegatronBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_megatron_bert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MegatronBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_megatron_bert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MegatronBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MegatronBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) # Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante all_generative_model_classes = () pipeline_model_mapping = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # test_resize_embeddings = False test_head_masking = False # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = MegatronBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MegatronBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_megatron_bert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*config_and_inputs) def _long_tensor(tok_lst): return torch.tensor( tok_lst, dtype=torch.long, device=torch_device, ) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class MegatronBertModelIntegrationTests(unittest.TestCase): @slow @unittest.skip(reason="Model is not available.") def test_inference_no_head(self): directory = "nvidia/megatron-bert-uncased-345m" if "MYDIR" in os.environ: directory = os.path.join(os.environ["MYDIR"], directory) model = MegatronBertModel.from_pretrained(directory) model.to(torch_device) model.half() input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 9, 1024)) self.assertEqual(output.shape, expected_shape) expected = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3): for jj in range(3): a = output[0, ii, jj] b = expected[3 * ii + jj] msg = f"ii={ii} jj={jj} a={a} b={b}" self.assertTrue(math.isclose(a, b, rel_tol=TOLERANCE, abs_tol=TOLERANCE), msg=msg)
transformers/tests/models/megatron_bert/test_modeling_megatron_bert.py/0
{ "file_path": "transformers/tests/models/megatron_bert/test_modeling_megatron_bert.py", "repo_id": "transformers", "token_count": 7107 }
560
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GotOcr2 model.""" import unittest import accelerate import pytest from transformers import ( AutoProcessor, Mistral3Config, is_torch_available, ) from transformers.testing_utils import ( Expectations, cleanup, require_deterministic_for_xpu, require_read_token, require_torch, require_torch_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Mistral3ForConditionalGeneration, Mistral3Model, ) class Mistral3VisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, image_seq_length=4, vision_feature_layer=-1, ignore_index=-100, image_token_index=1, num_channels=3, image_size=30, model_type="mistral3", is_training=True, text_config={ "model_type": "mistral", "vocab_size": 99, "attention_dropout": 0.0, "hidden_act": "silu", "hidden_size": 32, "initializer_range": 0.02, "intermediate_size": 37, "max_position_embeddings": 512, "num_attention_heads": 4, "num_hidden_layers": 2, "num_key_value_heads": 2, "rms_norm_eps": 1e-05, "rope_theta": 1000000000.0, "sliding_window": None, "bos_token_id": 2, "eos_token_id": 3, "pad_token_id": 4, }, vision_config={ "model_type": "pixtral", "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "image_size": 30, "patch_size": 6, "num_channels": 3, "hidden_act": "gelu", }, ): self.parent = parent self.ignore_index = ignore_index self.bos_token_id = text_config["bos_token_id"] self.eos_token_id = text_config["eos_token_id"] self.pad_token_id = text_config["pad_token_id"] self.image_token_index = image_token_index self.model_type = model_type self.text_config = text_config self.vision_config = vision_config self.batch_size = batch_size self.vision_feature_layer = vision_feature_layer self.is_training = is_training self.image_seq_length = image_seq_length self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length + self.image_seq_length self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] def get_config(self): return Mistral3Config( text_config=self.text_config, vision_config=self.vision_config, model_type=self.model_type, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, image_token_index=self.image_token_index, image_seq_length=self.image_seq_length, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): config = self.get_config() pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) image_sizes = torch.tensor( [[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device ) # input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.image_token_index] = self.pad_token_id input_ids[:, : self.image_seq_length] = self.image_token_index inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "image_sizes": image_sizes, } return config, inputs_dict @require_torch class Mistral3ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Mistral3Model, Mistral3ForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (Mistral3ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "image-text-to-text": Mistral3ForConditionalGeneration, } if is_torch_available() else {} ) _is_composite = True test_headmasking = False test_pruning = False def setUp(self): self.model_tester = Mistral3VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Mistral3Config, has_text_modality=False) def test_config(self): # overwritten from `tests/test_configuration_common.py::ConfigTester` after #36077 # TODO: avoid overwritten once there is a better fix for #36077 def check_config_can_be_init_without_params(): config = self.config_tester.config_class() self.config_tester.parent.assertIsNotNone(config) self.config_tester.check_config_can_be_init_without_params = check_config_can_be_init_without_params self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="Compile not yet supported because in LLava models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @unittest.skip("FlashAttention only support fp16 and bf16 data type") def test_flash_attn_2_fp32_ln(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_eager_matches_fa2_generate(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_eager_matches_sdpa_generate(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_flash_attn_2_from_config(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_flash_attn_2_inference_equivalence(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip("Pixtral does not support attention interfaces.") def test_flex_attention_with_grads(self): pass @slow @require_torch_accelerator class Mistral3IntegrationTest(unittest.TestCase): @require_read_token def setUp(self): cleanup(torch_device, gc_collect=True) self.model_checkpoint = "mistralai/Mistral-Small-3.1-24B-Instruct-2503" self.model = Mistral3ForConditionalGeneration.from_pretrained(self.model_checkpoint, dtype=torch.bfloat16) accelerate.cpu_offload(self.model, execution_device=torch_device) def tearDown(self): cleanup(torch_device, gc_collect=True) @require_read_token def test_mistral3_integration_generate_text_only(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"') messages = [ { "role": "user", "content": [ {"type": "text", "text": "Write a haiku"}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.bfloat16) with torch.no_grad(): generate_ids = self.model.generate(**inputs, max_new_tokens=200, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): "Sure, here is a haiku for you:\n\nWhispers of the breeze,\nCherry blossoms softly fall,\nSpring's gentle embrace.", ("cuda", 8): "Sure, here is a haiku for you:\n\nWhispers of the breeze,\nCherry blossoms softly fall,\nSpring's gentle embrace.", } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @require_read_token def test_mistral3_integration_generate(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"') messages = [ { "role": "user", "content": [ {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"}, {"type": "text", "text": "Describe this image"}, ], } ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.bfloat16) with torch.no_grad(): generate_ids = self.model.generate(**inputs, max_new_tokens=20, do_sample=False) decoded_output = processor.decode( generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True ) expected_outputs = Expectations( { ("xpu", 3): "The image features two cats resting on a pink blanket. The cat on the left is a kitten", ("cuda", 8): 'The image features two cats lying on a pink surface, which appears to be a couch or a bed', ("rocm", (9, 4)): "The image features two cats lying on a pink surface, which appears to be a couch or a bed", ("rocm", (9, 5)): "The image features two tabby cats lying on a pink surface, which appears to be a cushion or" } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual(decoded_output, expected_output) @require_read_token @require_deterministic_for_xpu def test_mistral3_integration_batched_generate(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"') messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/ydshieh/kosmos-2.5/resolve/main/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"}, {"type": "text", "text": "Describe this image"}, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.bfloat16) output = self.model.generate(**inputs, do_sample=False, max_new_tokens=25) gen_tokens = output[:, inputs["input_ids"].shape[1] :] # Check first output decoded_output = processor.decode(gen_tokens[0], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "Calm lake's mirror gleams,\nWhispering pines stand in silence,\nPath to peace begins.", ("cuda", 8): "Wooden path to calm,\nReflections whisper secrets,\nNature's peace unfolds.", ("rocm", (9, 5)): "Calm waters reflect\nWooden path to distant shore\nSilence in the scene" } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(gen_tokens[1], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "The image depicts a vibrant urban scene in what appears to be Chinatown. The focal point is a traditional Chinese archway", ("cuda", 8): 'The image depicts a street scene in what appears to be a Chinatown district. The focal point is a traditional Chinese arch', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) @require_read_token @require_deterministic_for_xpu def test_mistral3_integration_batched_generate_multi_image(self): processor = AutoProcessor.from_pretrained(self.model_checkpoint) processor.chat_template = processor.chat_template.replace('strftime_now("%Y-%m-%d")', '"2025-06-20"') # Prepare inputs messages = [ [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/ydshieh/kosmos-2.5/resolve/main/view.jpg"}, {"type": "text", "text": "Write a haiku for this image"}, ], }, ], [ { "role": "user", "content": [ { "type": "image", "url": "https://huggingface.co/ydshieh/kosmos-2.5/resolve/main/Statue-of-Liberty-Island-New-York-Bay.jpg", }, { "type": "image", "url": "https://huggingface.co/ydshieh/kosmos-2.5/resolve/main/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg", }, { "type": "text", "text": "These images depict two different landmarks. Can you identify them?", }, ], }, ], ] inputs = processor.apply_chat_template( messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt" ).to(torch_device, dtype=torch.bfloat16) output = self.model.generate(**inputs, do_sample=False, max_new_tokens=25) gen_tokens = output[:, inputs["input_ids"].shape[1] :] # Check first output decoded_output = processor.decode(gen_tokens[0], skip_special_tokens=True) expected_outputs = Expectations( { ("cuda", 8): 'Calm waters reflect\nWooden path to distant shore\nSilence in the scene', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", ) # Check second output decoded_output = processor.decode(gen_tokens[1], skip_special_tokens=True) expected_outputs = Expectations( { ("xpu", 3): "Certainly! The images depict two iconic landmarks:\n\n1. The first image shows the Statue of Liberty in New York City.", ("cuda", 8): 'Certainly! The images depict two famous landmarks in the United States:\n\n1. The first image shows the Statue of Liberty,', } ) # fmt: skip expected_output = expected_outputs.get_expectation() self.assertEqual( decoded_output, expected_output, f"Decoded output: {decoded_output}\nExpected output: {expected_output}", )
transformers/tests/models/mistral3/test_modeling_mistral3.py/0
{ "file_path": "transformers/tests/models/mistral3/test_modeling_mistral3.py", "repo_id": "transformers", "token_count": 8691 }
561
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the MusicGen processor.""" import random import shutil import tempfile import unittest import numpy as np from transformers import T5Tokenizer, T5TokenizerFast from transformers.testing_utils import require_sentencepiece, require_torch from transformers.utils.import_utils import is_speech_available if is_speech_available(): from transformers import EncodecFeatureExtractor, MusicgenProcessor global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_sentencepiece class MusicgenProcessorTest(unittest.TestCase): def setUp(self): self.checkpoint = "facebook/musicgen-small" self.tmpdirname = tempfile.mkdtemp() def get_tokenizer(self, **kwargs): return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs) def get_feature_extractor(self, **kwargs): return EncodecFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = MusicgenProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = MusicgenProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = MusicgenProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, T5TokenizerFast) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="np") input_processor = processor(raw_speech, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(sequences=predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_decode_audio(self): feature_extractor = self.get_feature_extractor(padding_side="left") tokenizer = self.get_tokenizer() processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = [floats_list((1, x))[0] for x in range(5, 20, 5)] padding_mask = processor(raw_speech).padding_mask generated_speech = np.asarray(floats_list((3, 20)))[:, None, :] decoded_audios = processor.batch_decode(generated_speech, padding_mask=padding_mask) self.assertIsInstance(decoded_audios, list) for audio in decoded_audios: self.assertIsInstance(audio, np.ndarray) self.assertTrue(decoded_audios[0].shape == (1, 10)) self.assertTrue(decoded_audios[1].shape == (1, 15)) self.assertTrue(decoded_audios[2].shape == (1, 20))
transformers/tests/models/musicgen/test_processing_musicgen.py/0
{ "file_path": "transformers/tests/models/musicgen/test_processing_musicgen.py", "repo_id": "transformers", "token_count": 2286 }
562
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OneFormer model.""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import AutoModelForImageClassification, OneFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( Expectations, is_flaky, require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OneFormerForUniversalSegmentation, OneFormerModel if is_vision_available(): from transformers import OneFormerProcessor if is_vision_available(): from PIL import Image class OneFormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, vocab_size=99, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, sequence_length=77, n_ctx=4, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.vocab_size = vocab_size self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.sequence_length = sequence_length self.n_ctx = n_ctx def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) task_inputs = ( torch.randint(high=self.vocab_size, size=(self.batch_size, self.sequence_length)).to(torch_device).long() ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) text_inputs = ( torch.randint( high=self.vocab_size, size=(self.batch_size, self.num_queries - self.n_ctx, self.sequence_length) ) .to(torch_device) .long() ) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels def get_config(self): config = OneFormerConfig( text_encoder_vocab_size=self.vocab_size, hidden_size=self.hidden_dim, num_queries=self.num_queries, num_labels=self.num_labels, encoder_feedforward_dim=32, dim_feedforward=64, encoder_layers=2, decoder_layers=2, ) config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_dim = self.hidden_dim config.conv_dim = self.hidden_dim config.text_encoder_width = self.hidden_dim config.task_seq_len = self.sequence_length config.max_seq_len = self.sequence_length config.text_encoder_context_length = self.sequence_length config.text_encoder_n_ctx = self.n_ctx return config def prepare_config_and_inputs_for_common(self): config, pixel_values, task_inputs, pixel_mask, _, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "task_inputs": task_inputs} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), config.encoder_layers) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers - 1) def create_and_check_oneformer_model( self, config, pixel_values, task_inputs, pixel_mask, output_hidden_states=False ): with torch.no_grad(): model = OneFormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) output = model(pixel_values, task_inputs=task_inputs, output_hidden_states=True) # the correct shape of output.transformer_decoder_hidden_states ensure the correctness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_object_queries.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_hidden_states is not None) self.parent.assertTrue(output.encoder_hidden_states is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_oneformer_universal_segmentation_head_model( self, config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels ): model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_hidden_states is not None) self.parent.assertTrue(result.pixel_decoder_hidden_states is not None) self.parent.assertTrue(result.encoder_hidden_states is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) result = model(pixel_values, task_inputs) comm_check_on_output(result) config.is_training = True model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels, text_inputs=text_inputs, ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([1])) @require_torch class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OneFormerModel, OneFormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": OneFormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = OneFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=OneFormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @is_flaky( description="The `attention_mask` computed with `< 0.5` in `OneFormerTransformerDecoder.forward_prediction_heads` is sensitive to input values." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_oneformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=False) def test_oneformer_universal_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_oneformer_universal_segmentation_head_model(*config_and_inputs) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1:3] self.assertEqual(model_class.main_input_name, observed_main_input_name) @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_simple(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_hidden_state(self): pass @unittest.skip(reason="OneFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OneFormer does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="OneFormer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="OneFormer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="OneFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "task_inputs"] self.assertListEqual(arg_names[:2], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in ["shi-labs/oneformer_ade20k_swin_tiny"]: model = OneFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "task_inputs": torch.randint(high=self.model_tester.vocab_size, size=(2, 77), device=torch_device).long(), "text_inputs": torch.randint( high=self.model_tester.vocab_size, size=(2, 6, 77), device=torch_device ).long(), "mask_labels": torch.randn((2, 150, *size), device=torch_device), "class_labels": torch.zeros(2, 150, device=torch_device).long(), } config = self.model_tester.get_config() config.is_training = True model = OneFormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.is_training = True config.contrastive_temperature = 1 configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if ( "self_attn.sampling_offsets.bias" in name or "self_attn.value_proj.weight" in name or "self_attn.output_proj.weight" in name or "self_attn.in_proj_weight" in name or "self_attn.out_proj.weight" in name or "mlp.fc1.weight" in name or "mlp.fc2.weight" in name or "text_mapper.text_encoder.positional_embedding" in name or "text_mapper.text_encoder.token_embedding.weight" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_initialization_pretrained_backbone(self): backbone_name = "microsoft/resnet-18" # load OneFormerConfig config with a pretrained backbone config = OneFormerConfig( backbone=backbone_name, use_pretrained_backbone=True, ) # load pretrained backbone backbone_model = AutoModelForImageClassification.from_pretrained(backbone_name, device_map=torch_device) def params_match(params1, params2): return all((p1 == p2).all() for p1, p2 in zip(params1, params2)) for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertTrue( params_match( backbone_model.base_model.encoder.parameters(), model.pixel_level_module.encoder.encoder.parameters(), ) ) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertTrue( params_match( backbone_model.base_model.encoder.parameters(), model.model.pixel_level_module.encoder.encoder.parameters(), ) ) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.is_training = True model = model_class(config) model.to(torch_device) model.train() loss = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True config.is_training = True model = model_class(config) model.to(torch_device) model.train() outputs = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_class_predictions = outputs.transformer_decoder_class_predictions transformer_decoder_class_predictions.retain_grad() transformer_decoder_mask_predictions = outputs.transformer_decoder_mask_predictions transformer_decoder_mask_predictions.retain_grad() attentions = outputs.attentions[0][0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_class_predictions.grad) self.assertIsNotNone(transformer_decoder_mask_predictions.grad) self.assertIsNotNone(attentions.grad) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) TOLERANCE = 2e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class OneFormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "shi-labs/oneformer_ade20k_swin_tiny" @cached_property def default_processor(self): return OneFormerProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = OneFormerModel.from_pretrained(self.model_checkpoints).to(torch_device) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) task_inputs_shape = inputs["task_inputs"].shape # check size self.assertEqual(task_inputs_shape, (1, 77)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = [[0.2723, 0.8280, 0.6026], [1.2699, 1.1257, 1.1444], [1.1344, 0.6153, 0.4177]] expected_slice_hidden_state = torch.tensor(expected_slice_hidden_state).to(torch_device) slice_hidden_state = outputs.encoder_hidden_states[-1][0, 0, :3, :3] torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) expected_slice_hidden_state = [[1.0581, 1.2276, 1.2003], [1.1903, 1.2925, 1.2862], [1.158, 1.2559, 1.3216]] expected_slice_hidden_state = torch.tensor(expected_slice_hidden_state).to(torch_device) slice_hidden_state = outputs.pixel_decoder_hidden_states[0][0, 0, :3, :3] torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) expectations = Expectations( { (None, None): [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]], ("cuda", 8): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]], } ) expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device) slice_hidden_state = outputs.transformer_decoder_class_predictions[0, :3, :3] torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE) def test_inference_universal_segmentation_head(self): model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, (inputs_shape[-1] + 2) // 4), ) expectations = Expectations( { (None, None): [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]], ("cuda", 8): [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1), ) expectations = Expectations( { (None, None): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]], ("cuda", 8): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): dummy_model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) processor = self.default_processor processor.image_processor.num_text = dummy_model.config.num_queries - dummy_model.config.text_encoder_n_ctx dummy_model.config.is_training = True model = OneFormerForUniversalSegmentation(dummy_model.config).to(torch_device).eval() del dummy_model inputs = processor( [np.zeros((3, 512, 640)), np.zeros((3, 512, 640))], ["semantic", "semantic"], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["task_inputs"] = inputs["task_inputs"].to(torch_device) inputs["text_inputs"] = inputs["text_inputs"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/oneformer/test_modeling_oneformer.py/0
{ "file_path": "transformers/tests/models/oneformer/test_modeling_oneformer.py", "repo_id": "transformers", "token_count": 12263 }
563
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch PEGASUS-X model.""" import copy import math import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import PegasusTokenizer, PegasusXConfig, PegasusXForConditionalGeneration, PegasusXModel from transformers.models.pegasus_x.modeling_pegasus_x import PegasusXDecoder, PegasusXEncoder def prepare_pegasus_x_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } @require_torch class PegasusXModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = PegasusXConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, stagger_local_blocks=False, ) inputs_dict = prepare_pegasus_x_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = PegasusXModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = PegasusXModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = PegasusXEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = PegasusXDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class PegasusXModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PegasusXModel, PegasusXForConditionalGeneration) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": PegasusXModel, "summarization": PegasusXForConditionalGeneration, "text2text-generation": PegasusXForConditionalGeneration, "translation": PegasusXForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = PegasusXModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) @unittest.skip( "`PegasusXGlobalLocalAttention` returns attentions as dictionary - not compatible with torchscript " ) def test_torchscript_output_attentions(self): pass def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (PegasusXModel, PegasusXForConditionalGeneration): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = PegasusXForConditionalGeneration(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0]["local"].shape[-4:]), [ self.model_tester.num_attention_heads, math.ceil(encoder_seq_length / model.config.block_size), model.config.block_size, model.config.block_size + model.config.num_global_tokens, ], ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): encoder_expected_shape = ( batch_size, config.num_attention_heads, math.ceil(prompt_length / config.block_size), config.block_size, config.block_size + config.num_global_tokens, ) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions["local"].shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length): encoder_expected_shape = (batch_size, self.round_up(prompt_length, config.block_size), config.hidden_size) self.assertIsInstance(hidden_states, tuple) # Only the last layer will have the hidden states truncated back to token level self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states[:-1]], [encoder_expected_shape] * (len(hidden_states) - 1), ) # Only the last layer will have the hidden states truncated back to token level self.assertEqual( hidden_states[-1][0].shape, (batch_size, prompt_length, config.hidden_size), ) def test_hidden_states_output(self): def _check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.round_up(seq_length, config.block_size), self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True _check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True _check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions["local"].retain_grad() encoder_attentions["global"].retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions["local"].grad) self.assertIsNotNone(encoder_attentions["global"].grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) @classmethod def round_up(cls, n, k): return math.ceil(n / k) * k def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class PegasusXModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return PegasusTokenizer.from_pretrained("google/pegasus-x-base") def test_inference_no_head(self): model = PegasusXModel.from_pretrained("google/pegasus-x-base").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[[0.0702, -0.1552, 0.1192], [0.0836, -0.1848, 0.1304], [0.0673, -0.1686, 0.1045]]], device=torch_device ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_inference_head(self): model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device) # change to intended input input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[[0.0, 9.5705185, 1.5897303], [0.0, 9.833374, 1.5828674], [0.0, 10.429961, 1.5643371]]], device=torch_device, ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE) def test_seq_to_seq_generation(self): hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base-arxiv").to(torch_device) tok = PegasusTokenizer.from_pretrained("google/pegasus-x-base") batch_input = [ "While large pretrained Transformer models have proven highly capable at tackling natural language tasks," " handling long sequence inputs continues to be a significant challenge. One such task is long input" " summarization, where inputs are longer than the maximum input context of most pretrained models. Through" " an extensive set of experiments, we investigate what model architectural changes and pretraining" " paradigms can most efficiently adapt a pretrained Transformer for long input summarization. We find that" " a staggered, block-local Transformer with global encoder tokens strikes a good balance of performance" " and efficiency, and that an additional pretraining phase on long sequences meaningfully improves" " downstream summarization performance. Based on our findings, we introduce PEGASUS-X, an extension of the" " PEGASUS model with additional long input pretraining to handle inputs of up to 16K tokens. PEGASUS-X" " achieves strong performance on long input summarization tasks comparable with much larger models while" " adding few additional parameters and not requiring model parallelism to train." ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation="only_first", return_tensors="pt", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, max_length=32, ) EXPECTED = [ "we investigate the performance of a new pretrained model for long input summarization. <n> the model is a" " superposition of two well -" ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED class PegasusXStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=50, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = PegasusXConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = PegasusXDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = PegasusXDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, use_cache=True)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class PegasusXStandaloneDecoderModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (PegasusXDecoder,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False test_head_masking = False def setUp( self, ): self.model_tester = PegasusXStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="Decoder cannot keep gradients") def test_flex_attention_with_grads(): return
transformers/tests/models/pegasus_x/test_modeling_pegasus_x.py/0
{ "file_path": "transformers/tests/models/pegasus_x/test_modeling_pegasus_x.py", "repo_id": "transformers", "token_count": 16634 }
564
# Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class ProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "microsoft/prophetnet-large-uncased" tokenizer_class = ProphetNetTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00e9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00e9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535a\u63a8zz"), ["ah", "\u535a", "\u63a8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["h\u00e9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00e9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for i, token in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) @require_torch def test_prepare_batch(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) result = list(batch.input_ids.numpy()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00a0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_2 + [102]
transformers/tests/models/prophetnet/test_tokenization_prophetnet.py/0
{ "file_path": "transformers/tests/models/prophetnet/test_tokenization_prophetnet.py", "repo_id": "transformers", "token_count": 3474 }
565