text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# coding=utf-8 # Copyright 2025 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ConvNeXT model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DINOv3ConvNextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DINOv3ConvNextModel`]. It is used to instantiate an DINOv3ConvNext model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the DINOv3ConvNext [facebook/dinov3-convnext-tiny-pretrain-lvd1689m](https://huggingface.co/facebook/dinov3-convnext-tiny-pretrain-lvd1689m) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. hidden_sizes (`list[int]`, *optional*, defaults to [96, 192, 384, 768]): Dimensionality (hidden size) at each stage. depths (`list[int]`, *optional*, defaults to [3, 3, 9, 3]): The number of layers for each stage. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. layer_scale_init_value (`float`, *optional*, defaults to 1e-06): The initial value for the layer scale. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop rate for stochastic depth. image_size (`int`, *optional*, defaults to 224): The size (resolution) of input images. Example: ```python >>> from transformers import DINOv3ConvNextConfig, DINOv3ConvNextModel >>> # Initializing a DINOv3ConvNext (tiny variant) style configuration >>> config = DINOv3ConvNextConfig() >>> # Initializing a model (with random weights) >>> model = DINOv3ConvNextModel(config) >>> # Accessing the model config >>> config = model.config ```""" model_type = "dinov3_convnext" def __init__( self, num_channels: int = 3, hidden_sizes: Optional[list[int]] = None, depths: Optional[list[int]] = None, hidden_act: str = "gelu", initializer_range: float = 0.02, layer_norm_eps: float = 1e-6, layer_scale_init_value: float = 1e-6, drop_path_rate: float = 0.0, image_size: int = 224, **kwargs, ): super().__init__(**kwargs) self.num_channels = num_channels self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes self.depths = [3, 3, 9, 3] if depths is None else depths self.hidden_act = hidden_act self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.layer_scale_init_value = layer_scale_init_value self.drop_path_rate = drop_path_rate self.image_size = image_size @property def num_stages(self) -> int: return len(self.hidden_sizes) __all__ = ["DINOv3ConvNextConfig"]
transformers/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py/0
{ "file_path": "transformers/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py", "repo_id": "transformers", "token_count": 1577 }
487
# coding=utf-8 # Copyright 2025 The rednote-hilab team and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig, layer_type_validation from ...utils import logging logger = logging.get_logger(__name__) class Dots1Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Dots1Model`]. It is used to instantiate a `dots.llm1` model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of [rednote-hilab/dots.llm1.base](https://huggingface.co/rednote-hilab/dots.llm1.base). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 152064): Vocabulary size of the model. Defines the number of different tokens that can be represented by the `input_ids` passed when calling [`Dots1Model`]. hidden_size (`int`, *optional*, defaults to 4608): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 10944): Dimension of the MLP representations. moe_intermediate_size (`int`, *optional*, defaults to 1408): Dimension of the MoE representations. num_hidden_layers (`int`, *optional*, defaults to 62): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 32): Number of key/value heads for Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, Multi Head Attention (MHA) is used. If `num_key_value_heads=1`, Multi Query Attention (MQA) is used. Otherwise, Grouped Query Attention (GQA) is used. If not specified, defaults to `num_attention_heads`. n_shared_experts (`int`, *optional*, default=None): Number of shared experts. None means dense model. n_routed_experts (`int`, *optional*, default=None): Number of routed experts. None means dense model. n_group (`int`, *optional*, defaults to 1): Number of groups for routed experts. topk_group (`int`, *optional*, defaults to 1): Number of selected groups for each token (selected experts only within `topk_group` groups). num_experts_per_tok (`int`, *optional*, default=None): Number of selected experts. None means dense model. first_k_dense_replace (`int`, *optional*, defaults to 0): Number of dense layers at the beginning of the model before the first MoE layer. norm_topk_prob (`bool`, *optional*, defaults to `False`): Whether to normalize the weights of the routed experts. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string). max_position_embeddings (`int`, *optional*, defaults to 2048): Maximum sequence length the model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): Standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): Epsilon used by the RMS normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions. Only relevant if `config.is_decoder=True`. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie the input and output word embeddings. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`dict`, *optional*): Dictionary for scaling RoPE embeddings. Supports `{"type": strategy name, "factor": scaling factor}`. attention_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in the self-attention projections. attention_dropout (`float`, *optional*, defaults to 0.0): Dropout ratio for the attention probabilities. routed_scaling_factor (`float`, *optional*, defaults to 1.0): Scaling factor for routed experts. sliding_window (`int`, *optional*, defaults to 4096): Size of the sliding window for attention. If not specified, defaults to `4096`. max_window_layers (`int`, *optional*, defaults to 62): The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any additional layer afterwards will use SWA (Sliding Window Attention). layer_types (`list`, *optional*): Attention pattern for each layer. Examples: ```python >>> from transformers import Dots1Model, Dots1Config >>> # Initializing a Dots1 style configuration >>> configuration = Dots1Config() >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "dots1" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { # TODO: only replicate attention layers when > first_k_dense_replace "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.experts.*.gate_proj": "local_colwise", "layers.*.mlp.experts.*.up_proj": "local_colwise", "layers.*.mlp.experts.*.down_proj": "local_rowwise", "layers.*.mlp.experts.*": "local", # each expert is wrapped in a module list "layers.*.mlp.shared_experts.gate_proj": "local_colwise", "layers.*.mlp.shared_experts.up_proj": "local_colwise", "layers.*.mlp.shared_experts.down_proj": "local_rowwise", "layers.*.mlp.shared_experts": "local", "layers.*.mlp.gate_proj": "local_colwise", "layers.*.mlp.up_proj": "local_colwise", "layers.*.mlp.down_proj": "local_rowwise", "layers.*.mlp": "gather", # This is the only moment where results are gathered } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size=152064, hidden_size=4608, intermediate_size=10944, moe_intermediate_size=1408, num_hidden_layers=62, num_attention_heads=32, num_key_value_heads=32, n_shared_experts=None, n_routed_experts=None, n_group=1, topk_group=1, num_experts_per_tok=None, first_k_dense_replace=0, norm_topk_prob=False, hidden_act="silu", max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-6, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, routed_scaling_factor=1.0, sliding_window=4096, max_window_layers=62, layer_types=None, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.moe_intermediate_size = moe_intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.n_shared_experts = n_shared_experts self.n_routed_experts = n_routed_experts self.num_experts_per_tok = num_experts_per_tok self.first_k_dense_replace = first_k_dense_replace self.norm_topk_prob = norm_topk_prob if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.n_group = n_group self.topk_group = topk_group self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.routed_scaling_factor = routed_scaling_factor self.sliding_window = sliding_window self.max_window_layers = max_window_layers self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if self.sliding_window is not None and i >= self.max_window_layers else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types) super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["Dots1Config"]
transformers/src/transformers/models/dots1/configuration_dots1.py/0
{ "file_path": "transformers/src/transformers/models/dots1/configuration_dots1.py", "repo_id": "transformers", "token_count": 4114 }
488
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert DPT checkpoints from the original repository. URL: https://github.com/isl-org/DPT""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_dpt_config(checkpoint_url): config = DPTConfig() if "large" in checkpoint_url: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.backbone_out_indices = [5, 11, 17, 23] config.neck_hidden_sizes = [256, 512, 1024, 1024] expected_shape = (1, 384, 384) if "ade" in checkpoint_url: config.use_batch_norm_in_fusion_residual = True config.num_labels = 150 repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text()) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} expected_shape = [1, 150, 480, 480] return config, expected_shape def remove_ignore_keys_(state_dict): ignore_keys = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(name): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): name = name.replace("pretrained.model", "dpt.encoder") if "pretrained.model" in name: name = name.replace("pretrained.model", "dpt.embeddings") if "patch_embed" in name: name = name.replace("patch_embed", "patch_embeddings") if "pos_embed" in name: name = name.replace("pos_embed", "position_embeddings") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "proj" in name and "project" not in name: name = name.replace("proj", "projection") if "blocks" in name: name = name.replace("blocks", "layer") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "scratch.output_conv" in name: name = name.replace("scratch.output_conv", "head") if "scratch" in name: name = name.replace("scratch", "neck") if "layer1_rn" in name: name = name.replace("layer1_rn", "convs.0") if "layer2_rn" in name: name = name.replace("layer2_rn", "convs.1") if "layer3_rn" in name: name = name.replace("layer3_rn", "convs.2") if "layer4_rn" in name: name = name.replace("layer4_rn", "convs.3") if "refinenet" in name: layer_idx = int(name[len("neck.refinenet") : len("neck.refinenet") + 1]) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 name = name.replace(f"refinenet{layer_idx}", f"fusion_stage.layers.{abs(layer_idx - 4)}") if "out_conv" in name: name = name.replace("out_conv", "projection") if "resConfUnit1" in name: name = name.replace("resConfUnit1", "residual_layer1") if "resConfUnit2" in name: name = name.replace("resConfUnit2", "residual_layer2") if "conv1" in name: name = name.replace("conv1", "convolution1") if "conv2" in name: name = name.replace("conv2", "convolution2") # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: name = name.replace("pretrained.act_postprocess1.0.project.0", "neck.reassemble_stage.readout_projects.0.0") if "pretrained.act_postprocess2.0.project.0" in name: name = name.replace("pretrained.act_postprocess2.0.project.0", "neck.reassemble_stage.readout_projects.1.0") if "pretrained.act_postprocess3.0.project.0" in name: name = name.replace("pretrained.act_postprocess3.0.project.0", "neck.reassemble_stage.readout_projects.2.0") if "pretrained.act_postprocess4.0.project.0" in name: name = name.replace("pretrained.act_postprocess4.0.project.0", "neck.reassemble_stage.readout_projects.3.0") # resize blocks if "pretrained.act_postprocess1.3" in name: name = name.replace("pretrained.act_postprocess1.3", "neck.reassemble_stage.layers.0.projection") if "pretrained.act_postprocess1.4" in name: name = name.replace("pretrained.act_postprocess1.4", "neck.reassemble_stage.layers.0.resize") if "pretrained.act_postprocess2.3" in name: name = name.replace("pretrained.act_postprocess2.3", "neck.reassemble_stage.layers.1.projection") if "pretrained.act_postprocess2.4" in name: name = name.replace("pretrained.act_postprocess2.4", "neck.reassemble_stage.layers.1.resize") if "pretrained.act_postprocess3.3" in name: name = name.replace("pretrained.act_postprocess3.3", "neck.reassemble_stage.layers.2.projection") if "pretrained.act_postprocess4.3" in name: name = name.replace("pretrained.act_postprocess4.3", "neck.reassemble_stage.layers.3.projection") if "pretrained.act_postprocess4.4" in name: name = name.replace("pretrained.act_postprocess4.4", "neck.reassemble_stage.layers.3.resize") if "pretrained" in name: name = name.replace("pretrained", "dpt") if "bn" in name: name = name.replace("bn", "batch_norm") if "head" in name: name = name.replace("head", "head.head") if "encoder.norm" in name: name = name.replace("encoder.norm", "layernorm") if "auxlayer" in name: name = name.replace("auxlayer", "auxiliary_head.head") return name # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config): for i in range(config.num_hidden_layers): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[: config.hidden_size, :] state_dict[f"dpt.encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"dpt.encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_dpt_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub, model_name): """ Copy/paste/tweak model's weights to our DPT structure. """ # define DPT configuration based on URL config, expected_shape = get_dpt_config(checkpoint_url) # load original state_dict from URL state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") # remove certain keys remove_ignore_keys_(state_dict) # rename keys for key in state_dict.copy(): val = state_dict.pop(key) state_dict[rename_key(key)] = val # read in qkv matrices read_in_q_k_v(state_dict, config) # load HuggingFace model model = DPTForSemanticSegmentation(config) if "ade" in checkpoint_url else DPTForDepthEstimation(config) model.load_state_dict(state_dict) model.eval() # Check outputs on an image size = 480 if "ade" in checkpoint_url else 384 image_processor = DPTImageProcessor(size=size) image = prepare_img() encoding = image_processor(image, return_tensors="pt") # forward pass outputs = model(**encoding).logits if "ade" in checkpoint_url else model(**encoding).predicted_depth # Assert logits expected_slice = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]) if "ade" in checkpoint_url: expected_slice = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]) assert outputs.shape == torch.Size(expected_shape) assert ( torch.allclose(outputs[0, 0, :3, :3], expected_slice, atol=1e-4) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3], expected_slice) ) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing model to hub...") model.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add model", use_temp_dir=True, ) image_processor.push_to_hub( repo_path_or_name=Path(pytorch_dump_folder_path, model_name), organization="nielsr", commit_message="Add image processor", use_temp_dir=True, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, required=False, help="Name of the model, in case you're pushing to the hub.", ) args = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
transformers/src/transformers/models/dpt/convert_dpt_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/dpt/convert_dpt_to_pytorch.py", "repo_id": "transformers", "token_count": 4983 }
489
# coding=utf-8 # Copyright 2023 Google Research, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch EfficientNet model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from .configuration_efficientnet import EfficientNetConfig logger = logging.get_logger(__name__) def round_filters(config: EfficientNetConfig, num_channels: int): r""" Round number of filters based on depth multiplier. """ divisor = config.depth_divisor num_channels *= config.width_coefficient new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_dim < 0.9 * num_channels: new_dim += divisor return int(new_dim) def correct_pad(kernel_size: Union[int, tuple], adjust: bool = True): r""" Utility function to get the tuple padding value for the depthwise convolution. Args: kernel_size (`int` or `tuple`): Kernel size of the convolution layers. adjust (`bool`, *optional*, defaults to `True`): Adjusts padding value to apply to right and bottom sides of the input. """ if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) correct = (kernel_size[0] // 2, kernel_size[1] // 2) if adjust: return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) else: return (correct[1], correct[1], correct[0], correct[0]) class EfficientNetEmbeddings(nn.Module): r""" A module that corresponds to the stem module of the original work. """ def __init__(self, config: EfficientNetConfig): super().__init__() self.out_dim = round_filters(config, 32) self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) self.convolution = nn.Conv2d( config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False ) self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.activation = ACT2FN[config.hidden_act] def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: features = self.padding(pixel_values) features = self.convolution(features) features = self.batchnorm(features) features = self.activation(features) return features class EfficientNetDepthwiseConv2d(nn.Conv2d): def __init__( self, in_channels, depth_multiplier=1, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): out_channels = in_channels * depth_multiplier super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, ) class EfficientNetExpansionLayer(nn.Module): r""" This corresponds to the expansion phase of each block in the original implementation. """ def __init__(self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int): super().__init__() self.expand_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding="same", bias=False, ) self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) self.expand_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: # Expand phase hidden_states = self.expand_conv(hidden_states) hidden_states = self.expand_bn(hidden_states) hidden_states = self.expand_act(hidden_states) return hidden_states class EfficientNetDepthwiseLayer(nn.Module): r""" This corresponds to the depthwise convolution phase of each block in the original implementation. """ def __init__( self, config: EfficientNetConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool, ): super().__init__() self.stride = stride conv_pad = "valid" if self.stride == 2 else "same" padding = correct_pad(kernel_size, adjust=adjust_padding) self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) self.depthwise_conv = EfficientNetDepthwiseConv2d( in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False ) self.depthwise_norm = nn.BatchNorm2d( num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.depthwise_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: # Depthwise convolution if self.stride == 2: hidden_states = self.depthwise_conv_pad(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_norm(hidden_states) hidden_states = self.depthwise_act(hidden_states) return hidden_states class EfficientNetSqueezeExciteLayer(nn.Module): r""" This corresponds to the Squeeze and Excitement phase of each block in the original implementation. """ def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d( in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding="same", ) self.expand = nn.Conv2d( in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding="same", ) self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states class EfficientNetFinalBlockLayer(nn.Module): r""" This corresponds to the final phase of each block in the original implementation. """ def __init__( self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool ): super().__init__() self.apply_dropout = stride == 1 and not id_skip self.project_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_dim, kernel_size=1, padding="same", bias=False, ) self.project_bn = nn.BatchNorm2d( num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.dropout = nn.Dropout(p=drop_rate) def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: hidden_states = self.project_conv(hidden_states) hidden_states = self.project_bn(hidden_states) if self.apply_dropout: hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + embeddings return hidden_states class EfficientNetBlock(nn.Module): r""" This corresponds to the expansion and depthwise convolution phase of each block in the original implementation. Args: config ([`EfficientNetConfig`]): Model configuration class. in_dim (`int`): Number of input channels. out_dim (`int`): Number of output channels. stride (`int`): Stride size to be used in convolution layers. expand_ratio (`int`): Expand ratio to set the output dimensions for the expansion and squeeze-excite layers. kernel_size (`int`): Kernel size for the depthwise convolution layer. drop_rate (`float`): Dropout rate to be used in the final phase of each block. id_skip (`bool`): Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase of each block. Set to `True` for the first block of each stage. adjust_padding (`bool`): Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution operation, set to `True` for inputs with odd input sizes. """ def __init__( self, config: EfficientNetConfig, in_dim: int, out_dim: int, stride: int, expand_ratio: int, kernel_size: int, drop_rate: float, id_skip: bool, adjust_padding: bool, ): super().__init__() self.expand_ratio = expand_ratio self.expand = self.expand_ratio != 1 expand_in_dim = in_dim * expand_ratio if self.expand: self.expansion = EfficientNetExpansionLayer( config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride ) self.depthwise_conv = EfficientNetDepthwiseLayer( config=config, in_dim=expand_in_dim if self.expand else in_dim, stride=stride, kernel_size=kernel_size, adjust_padding=adjust_padding, ) self.squeeze_excite = EfficientNetSqueezeExciteLayer( config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand ) self.projection = EfficientNetFinalBlockLayer( config=config, in_dim=expand_in_dim if self.expand else in_dim, out_dim=out_dim, stride=stride, drop_rate=drop_rate, id_skip=id_skip, ) def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: embeddings = hidden_states # Expansion and depthwise convolution phase if self.expand_ratio != 1: hidden_states = self.expansion(hidden_states) hidden_states = self.depthwise_conv(hidden_states) # Squeeze and excite phase hidden_states = self.squeeze_excite(hidden_states) hidden_states = self.projection(embeddings, hidden_states) return hidden_states class EfficientNetEncoder(nn.Module): r""" Forward propagates the embeddings through each EfficientNet block. Args: config ([`EfficientNetConfig`]): Model configuration class. """ def __init__(self, config: EfficientNetConfig): super().__init__() self.config = config self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): # Round number of block repeats based on depth multiplier. return int(math.ceil(self.depth_coefficient * repeats)) num_base_blocks = len(config.in_channels) num_blocks = sum(round_repeats(n) for n in config.num_block_repeats) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = j == 0 stride = 1 if j > 0 else stride in_dim = out_dim if j > 0 else in_dim adjust_padding = curr_block_num not in config.depthwise_padding drop_rate = config.drop_connect_rate * curr_block_num / num_blocks block = EfficientNetBlock( config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding, ) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) self.top_conv = nn.Conv2d( in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding="same", bias=False, ) self.top_bn = nn.BatchNorm2d( num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.top_activation = ACT2FN[config.hidden_act] def forward( self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> BaseModelOutputWithNoAttention: all_hidden_states = (hidden_states,) if output_hidden_states else None for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.top_conv(hidden_states) hidden_states = self.top_bn(hidden_states) hidden_states = self.top_activation(hidden_states) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) @auto_docstring class EfficientNetPreTrainedModel(PreTrainedModel): config: EfficientNetConfig base_model_prefix = "efficientnet" main_input_name = "pixel_values" _no_split_modules = [] def _init_weights(self, module: nn.Module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() @auto_docstring class EfficientNetModel(EfficientNetPreTrainedModel): def __init__(self, config: EfficientNetConfig): super().__init__(config) self.config = config self.embeddings = EfficientNetEmbeddings(config) self.encoder = EfficientNetEncoder(config) # Final pooling layer if config.pooling_type == "mean": self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) elif config.pooling_type == "max": self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) else: raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Apply pooling last_hidden_state = encoder_outputs[0] pooled_output = self.pooler(last_hidden_state) # Reshape (batch_size, 1280, 1 , 1) -> (batch_size, 1280) pooled_output = pooled_output.reshape(pooled_output.shape[:2]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @auto_docstring( custom_intro=""" EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ) class EfficientNetForImageClassification(EfficientNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.efficientnet = EfficientNetModel(config) # Classifier head self.dropout = nn.Dropout(p=config.dropout_rate) self.classifier = nn.Linear(config.hidden_dim, self.num_labels) if self.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.efficientnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs.pooler_output if return_dict else outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) __all__ = ["EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel"]
transformers/src/transformers/models/efficientnet/modeling_efficientnet.py/0
{ "file_path": "transformers/src/transformers/models/efficientnet/modeling_efficientnet.py", "repo_id": "transformers", "token_count": 9357 }
490
# coding=utf-8 # Copyright 2025 Mobile Perception Systems Lab at TU/e and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch EoMT model.""" import math from dataclasses import dataclass from typing import Optional import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2FN from ...file_utils import ( ModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( auto_docstring, can_return_tuple, logging, ) from ..dinov2.modeling_dinov2 import ( Dinov2Embeddings, Dinov2Layer, Dinov2LayerScale, Dinov2PatchEmbeddings, ) from ..mask2former.modeling_mask2former import Mask2FormerForUniversalSegmentation, Mask2FormerLoss from ..siglip.modeling_siglip import SiglipAttention from ..vit.configuration_vit import ViTConfig logger = logging.get_logger(__name__) class EomtConfig(ViTConfig): r""" This is the configuration class to store the configuration of a [`EomtForUniversalSegmentation`]. It is used to instantiate an EoMT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the EoMT [tue-mps/coco_panoptic_eomt_large_640](https://huggingface.co/tue-mps/coco_panoptic_eomt_large_640) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the hidden representations. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads in each attention layer. mlp_ratio (`int`, *optional*, defaults to 4): Ratio of the MLP hidden dimensionality to the hidden size. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 640): The size (resolution) of each input image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. layerscale_value (`float`, *optional*, defaults to 1.0): Initial value for the LayerScale parameter. drop_path_rate (`float`, *optional*, defaults to 0.0): The stochastic depth rate (drop path) used during training. num_upscale_blocks (`int`, *optional*, defaults to 2): Number of upsampling blocks used in the decoder or segmentation head. attention_dropout (`float`, *optional*, defaults to 0.0): Dropout probability applied after attention projection. use_swiglu_ffn (`bool`, *optional*, defaults to `False`): Whether to use the SwiGLU feedforward neural network. num_blocks (`int`, *optional*, defaults to 4): Number of feature blocks or stages in the architecture. no_object_weight (`float`, *optional*, defaults to 0.1): Loss weight for the 'no object' class in panoptic/instance segmentation. class_weight (`float`, *optional*, defaults to 2.0): Loss weight for classification targets. mask_weight (`float`, *optional*, defaults to 5.0): Loss weight for mask prediction. dice_weight (`float`, *optional*, defaults to 5.0): Loss weight for the dice loss component. train_num_points (`int`, *optional*, defaults to 12544): Number of points to sample for mask loss computation during training. oversample_ratio (`float`, *optional*, defaults to 3.0): Oversampling ratio used in point sampling for mask training. importance_sample_ratio (`float`, *optional*, defaults to 0.75): Ratio of points to sample based on importance during training. num_queries (`int`, *optional*, defaults to 200): Number of object queries in the Transformer. num_register_tokens (`int`, *optional*, defaults to 4): Number of learnable register tokens added to the transformer input. Example: ```python >>> from transformers import EomtConfig, EomtForUniversalSegmentation >>> # Initialize configuration >>> config = EomtConfig() >>> # Initialize model >>> model = EomtForUniversalSegmentation(config) >>> # Access config >>> config = model.config ```""" model_type = "eomt" def __init__( self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, mlp_ratio=4, hidden_act="gelu", hidden_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-6, image_size=640, patch_size=16, num_channels=3, layerscale_value=1.0, drop_path_rate=0.0, num_upscale_blocks=2, attention_dropout=0.0, use_swiglu_ffn=False, num_blocks=4, no_object_weight: float = 0.1, class_weight: float = 2.0, mask_weight: float = 5.0, dice_weight: float = 5.0, train_num_points: int = 12544, oversample_ratio: float = 3.0, importance_sample_ratio: float = 0.75, num_queries=200, num_register_tokens=4, **kwargs, ): super().__init__( hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, hidden_dropout_prob=hidden_dropout_prob, hidden_act=hidden_act, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, image_size=image_size, patch_size=patch_size, num_channels=num_channels, **kwargs, ) del self.intermediate_size del self.qkv_bias del self.pooler_act del self.pooler_output_size del self.encoder_stride del self.attention_probs_dropout_prob self.mlp_ratio = mlp_ratio self.attention_dropout = attention_dropout self.layerscale_value = layerscale_value self.drop_path_rate = drop_path_rate self.num_upscale_blocks = num_upscale_blocks self.use_swiglu_ffn = use_swiglu_ffn self.num_blocks = num_blocks self.no_object_weight = no_object_weight self.class_weight = class_weight self.mask_weight = mask_weight self.dice_weight = dice_weight self.train_num_points = train_num_points self.oversample_ratio = oversample_ratio self.importance_sample_ratio = importance_sample_ratio self.num_queries = num_queries self.num_register_tokens = num_register_tokens @dataclass @auto_docstring( custom_intro=""" Class for outputs of [`EomtForUniversalSegmentationOutput`]. This output can be directly passed to [`~EomtImageProcessor.post_process_semantic_segmentation`] or [`~EomtImageProcessor.post_process_instance_segmentation`] or [`~EomtImageProcessor.post_process_panoptic_segmentation`] to compute final segmentation maps. Please, see [`~EomtImageProcessor] for details regarding usage. """ ) class EomtForUniversalSegmentationOutput(ModelOutput): r""" loss (`torch.Tensor`, *optional*): The computed loss, returned when labels are present. class_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each query. Note the `+ 1` is needed because we incorporate the null class. masks_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each query. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last layer. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states all layers of the model. attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tuple(torch.FloatTensor)` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Self and Cross Attentions weights from transformer decoder. patch_offsets (`list[torch.Tensor]`, *optional*): list of tuples indicating the image index and start and end positions of patches for semantic segementation. """ loss: Optional[torch.FloatTensor] = None class_queries_logits: Optional[torch.FloatTensor] = None masks_queries_logits: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None patch_offsets: Optional[list[torch.Tensor]] = None class EomtLoss(Mask2FormerLoss): pass class EomtPatchEmbeddings(Dinov2PatchEmbeddings): pass class EomtEmbeddings(Dinov2Embeddings, nn.Module): def __init__(self, config: EomtConfig) -> None: Dinov2Embeddings().__init__() self.config = config self.patch_size = config.patch_size self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size)) self.patch_embeddings = EomtPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches self.dropout = nn.Dropout(config.hidden_dropout_prob) self.num_prefix_tokens = 1 + config.num_register_tokens # 1 for [CLS] self.position_embeddings = nn.Embedding(num_patches, config.hidden_size) self.register_buffer("position_ids", torch.arange(num_patches).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self): raise AttributeError("Not needed for Eomt Model") def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: batch_size, _, _, _ = pixel_values.shape target_dtype = self.patch_embeddings.projection.weight.dtype embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype)) cls_tokens = self.cls_token.expand(batch_size, -1, -1) register_tokens = self.register_tokens.expand(batch_size, -1, -1) embeddings = embeddings + self.position_embeddings(self.position_ids) embeddings = torch.cat([cls_tokens, register_tokens, embeddings], dim=1) embeddings = self.dropout(embeddings) return embeddings class EomtAttention(SiglipAttention): pass class EomtLayerScale(Dinov2LayerScale): pass class EomtLayer(Dinov2Layer): pass class EomtLayerNorm2d(nn.LayerNorm): def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: hidden_state = hidden_state.permute(0, 2, 3, 1) hidden_state = F.layer_norm(hidden_state, self.normalized_shape, self.weight, self.bias, self.eps) hidden_state = hidden_state.permute(0, 3, 1, 2) return hidden_state class EomtScaleLayer(nn.Module): def __init__(self, config: EomtConfig): super().__init__() hidden_size = config.hidden_size self.conv1 = nn.ConvTranspose2d(hidden_size, hidden_size, kernel_size=2, stride=2) self.activation = ACT2FN[config.hidden_act] self.conv2 = nn.Conv2d( hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size, bias=False, ) self.layernorm2d = EomtLayerNorm2d(hidden_size) def forward(self, hidden_states: torch.tensor) -> torch.Tensor: hidden_states = self.conv1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv2(hidden_states) hidden_states = self.layernorm2d(hidden_states) return hidden_states class EomtScaleBlock(nn.Module): def __init__(self, config: EomtConfig): super().__init__() self.num_blocks = config.num_upscale_blocks self.block = nn.ModuleList([EomtScaleLayer(config) for _ in range(self.num_blocks)]) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: for block in self.block: hidden_states = block(hidden_states) return hidden_states class EomtMaskHead(nn.Module): def __init__(self, config: EomtConfig): super().__init__() hidden_size = config.hidden_size self.fc1 = nn.Linear(hidden_size, hidden_size) self.fc2 = nn.Linear(hidden_size, hidden_size) self.fc3 = nn.Linear(hidden_size, hidden_size) self.activation = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.activation(self.fc1(hidden_states)) hidden_states = self.activation(self.fc2(hidden_states)) hidden_states = self.fc3(hidden_states) return hidden_states @auto_docstring class EomtPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: EomtConfig base_model_prefix = "eomt" main_input_name = "pixel_values" supports_gradient_checkpointing = False _no_split_modules = ["EomtLayer"] _supports_sdpa = True _supports_flash_attn = True def _init_weights(self, module: nn.Module) -> None: std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 nn.init.uniform_(module.bias, -bound, bound) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=1) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, EomtLayerScale): if hasattr(module, "lambda1"): module.lambda1.data.fill_(self.config.layerscale_value) elif isinstance(module, EomtEmbeddings): module.cls_token.data = nn.init.trunc_normal_( module.cls_token.data.to(torch.float32), mean=0.0, std=std ).to(module.cls_token.dtype) module.register_tokens.data.zero_() @auto_docstring( custom_intro=""" The EoMT Model with head on top for instance/semantic/panoptic segmentation. """ ) class EomtForUniversalSegmentation(Mask2FormerForUniversalSegmentation, nn.Module): def __init__(self, config: EomtConfig) -> None: nn.Module().__init__(config) self.config = config self.num_hidden_layers = config.num_hidden_layers self.embeddings = EomtEmbeddings(config) self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.query = nn.Embedding(config.num_queries, config.hidden_size) self.layers = nn.ModuleList([EomtLayer(config) for _ in range(config.num_hidden_layers)]) self.upscale_block = EomtScaleBlock(config) self.mask_head = EomtMaskHead(config) self.class_predictor = nn.Linear(config.hidden_size, config.num_labels + 1) self.grid_size = (config.image_size // config.patch_size, config.image_size // config.patch_size) self.weight_dict: dict[str, float] = { "loss_cross_entropy": config.class_weight, "loss_mask": config.mask_weight, "loss_dice": config.dice_weight, } self.criterion = EomtLoss(config=config, weight_dict=self.weight_dict) self.register_buffer("attn_mask_probs", torch.ones(config.num_blocks)) self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def get_auxiliary_logits(self): raise AttributeError("Note needed for Eomt Model.") def predict(self, logits: torch.Tensor): query_tokens = logits[:, : self.config.num_queries, :] class_logits = self.class_predictor(query_tokens) prefix_tokens = logits[:, self.config.num_queries + self.embeddings.num_prefix_tokens :, :] prefix_tokens = prefix_tokens.transpose(1, 2) prefix_tokens = prefix_tokens.reshape(prefix_tokens.shape[0], -1, *self.grid_size) query_tokens = self.mask_head(query_tokens) prefix_tokens = self.upscale_block(prefix_tokens) mask_logits = torch.einsum("bqc, bchw -> bqhw", query_tokens, prefix_tokens) return mask_logits, class_logits @staticmethod def _disable_attention_mask(attn_mask, prob, num_query_tokens, encoder_start_tokens, device): if prob < 1: # Generate random queries to disable based on the probs random_queries = torch.rand(attn_mask.shape[0], num_query_tokens, device=device) > prob # Disable attention to the query tokens, considering the prefix tokens attn_mask[:, :num_query_tokens, encoder_start_tokens:][random_queries] = 1 return attn_mask @auto_docstring @can_return_tuple def forward( self, pixel_values: Tensor, mask_labels: Optional[list[Tensor]] = None, class_labels: Optional[list[Tensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, patch_offsets: Optional[list[Tensor]] = None, ): r""" mask_labels (`list[torch.Tensor]`, *optional*): list of mask labels of shape `(num_labels, height, width)` to be fed to a model class_labels (`list[torch.LongTensor]`, *optional*): list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. patch_offsets (`list[torch.Tensor]`, *optional*): list of tuples indicating the image index and start and end positions of patches for semantic segementation. """ output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None masks_queries_logits_per_layer, class_queries_logits_per_layer = (), () attention_mask = None if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) for idx, layer_module in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if idx == self.num_hidden_layers - self.config.num_blocks: query = self.query.weight[None, :, :].expand(hidden_states.shape[0], -1, -1).to(hidden_states.device) hidden_states = torch.cat((query, hidden_states), dim=1) if idx >= self.num_hidden_layers - self.config.num_blocks and ( self.training or self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks] > 0 ): norm_hidden_states = self.layernorm(hidden_states) masks_queries_logits, class_queries_logits = self.predict(norm_hidden_states) masks_queries_logits_per_layer += (masks_queries_logits,) class_queries_logits_per_layer += (class_queries_logits,) attention_mask = torch.ones( hidden_states.shape[0], hidden_states.shape[1], hidden_states.shape[1], device=hidden_states.device, dtype=torch.bool, ) interpolated_logits = F.interpolate(masks_queries_logits, size=self.grid_size, mode="bilinear") interpolated_logits = interpolated_logits.view( interpolated_logits.size(0), interpolated_logits.size(1), -1 ) num_query_tokens = self.config.num_queries encoder_start_tokens = num_query_tokens + self.embeddings.num_prefix_tokens # Set attention mask for queries to focus on encoder tokens based on interpolated logits attention_mask[:, :num_query_tokens, encoder_start_tokens:] = interpolated_logits > 0 # Disable attention mask for random query tokens. attention_mask = self._disable_attention_mask( attention_mask, prob=self.attn_mask_probs[idx - self.num_hidden_layers + self.config.num_blocks], num_query_tokens=num_query_tokens, encoder_start_tokens=encoder_start_tokens, device=attention_mask.device, ) # Expand attention mask to 4d mask. attention_mask = attention_mask[:, None, ...].expand(-1, self.config.num_attention_heads, -1, -1) attention_mask = attention_mask.float().masked_fill(~attention_mask, -1e9) layer_outputs = layer_module(hidden_states, attention_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) sequence_output = self.layernorm(hidden_states) if output_hidden_states: all_hidden_states += (sequence_output,) masks_queries_logits, class_queries_logits = self.predict(sequence_output) masks_queries_logits_per_layer += (masks_queries_logits,) class_queries_logits_per_layer += (class_queries_logits,) loss = None if mask_labels is not None and class_labels is not None: loss = 0.0 for masks_queries_logits, class_queries_logits in zip( masks_queries_logits_per_layer, class_queries_logits_per_layer ): loss_dict = self.get_loss_dict( masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, mask_labels=mask_labels, class_labels=class_labels, auxiliary_predictions=None, ) loss += self.get_loss(loss_dict) return EomtForUniversalSegmentationOutput( loss=loss, masks_queries_logits=masks_queries_logits, class_queries_logits=class_queries_logits, last_hidden_state=sequence_output, hidden_states=all_hidden_states, attentions=all_attentions, patch_offsets=patch_offsets, ) __all__ = ["EomtConfig", "EomtPreTrainedModel", "EomtForUniversalSegmentation"]
transformers/src/transformers/models/eomt/modular_eomt.py/0
{ "file_path": "transformers/src/transformers/models/eomt/modular_eomt.py", "repo_id": "transformers", "token_count": 10725 }
491
# coding=utf-8 # Copyright 2022 Meta and The HuggingFace Inc. team. All rights reserved. # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ESM model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, can_return_tuple, logging from .configuration_esm import EsmConfig if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) def rotate_half(x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(x, cos, sin): cos = cos[:, :, : x.shape[-2], :] sin = sin[:, :, : x.shape[-2], :] return (x * cos) + (rotate_half(x) * sin) def gelu(x): """ This is the gelu implementation from the original ESM repo. Using F.gelu yields subtly wrong results. """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def symmetrize(x): "Make layer symmetric in final two dimensions, used for contact prediction." return x + x.transpose(-1, -2) def average_product_correct(x): "Perform average product correct, used for contact prediction." a1 = x.sum(-1, keepdims=True) a2 = x.sum(-2, keepdims=True) a12 = x.sum((-1, -2), keepdims=True) avg = a1 * a2 avg.div_(a12) # in-place to reduce memory normalized = x - avg return normalized class RotaryEmbedding(torch.nn.Module): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int): super().__init__() # Generate and save the inverse frequency buffer (non trainable) inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) inv_freq = inv_freq self.register_buffer("inv_freq", inv_freq) self._seq_len_cached = None self._cos_cached = None self._sin_cached = None def _update_cos_sin_tables(self, x, seq_dimension=2): seq_len = x.shape[seq_dimension] # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: self._seq_len_cached = seq_len t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self._cos_cached = emb.cos()[None, None, :, :] self._sin_cached = emb.sin()[None, None, :, :] return self._cos_cached, self._sin_cached def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2) return ( apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype), apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype), ) class EsmContactPredictionHead(nn.Module): """Performs symmetrization, apc, and computes a logistic regression on the output features""" def __init__( self, in_features: int, bias=True, eos_idx: int = 2, ): super().__init__() self.in_features = in_features self.eos_idx = eos_idx self.regression = nn.Linear(in_features, 1, bias) self.activation = nn.Sigmoid() def forward(self, tokens, attentions): # remove eos token attentions eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1] # remove cls token attentions attentions = attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _ = attentions.size() attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) # features: batch x channels x tokens x tokens (symmetric) attentions = attentions.to( self.regression.weight.device ) # attentions always float32, may need to convert to float16 attentions = average_product_correct(symmetrize(attentions)) attentions = attentions.permute(0, 2, 3, 1) return self.activation(self.regression(attentions).squeeze(3)) class EsmEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) if config.emb_layer_norm_before: self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) else: self.layer_norm = None self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.padding_idx = config.pad_token_id if self.position_embedding_type == "absolute": self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.token_dropout = config.token_dropout self.mask_token_id = config.mask_token_id def forward( self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an # embedding_scale factor here. embeddings = inputs_embeds # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however, # masked tokens are treated as if they were selected for input dropout and zeroed out. # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample). # This is analogous to the way that dropout layers scale down outputs during evaluation when not # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training). if self.token_dropout and input_ids is not None: embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0) mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs src_lengths = attention_mask.sum(-1) mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to( embeddings.dtype ) if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if self.layer_norm is not None: embeddings = self.layer_norm(embeddings) if attention_mask is not None: embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype) # Matt: I think this line was copied incorrectly from BERT, disabling it for now. # embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class EsmSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) self.rotary_embeddings = None if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) elif self.position_embedding_type == "rotary": self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size) self.is_decoder = config.is_decoder self.layer_idx = layer_idx def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: hidden_shape = (hidden_states.shape[0], -1, self.num_attention_heads, self.attention_head_size) query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.key(encoder_hidden_states).view(hidden_shape).transpose(1, 2) value_layer = self.value(encoder_hidden_states).view(hidden_shape).transpose(1, 2) attention_mask = encoder_attention_mask else: key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2) # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim). # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent, # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original # ESM code and fix rotary embeddings. query_layer = query_layer * self.attention_head_size**-0.5 if self.position_embedding_type == "rotary": query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in EsmModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs.to(value_layer.dtype), value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (None,) return outputs class EsmSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class EsmFlashAttention2(EsmSelfAttention): """ ESM flash attention module. This module inherits from `EsmSelfAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, config, position_embedding_type=None, layer_idx=None): super().__init__(config, position_embedding_type=position_embedding_type, layer_idx=layer_idx) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() self.dropout_prob = config.attention_probs_dropout_prob def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: # Flash attention doesn't support output_attentions or cross attention if output_attentions or head_mask is not None or encoder_hidden_states is not None: logger.warning_once( "EsmFlashAttention2 does not support output_attentions, head_mask, or cross_attention. " "Falling back to the manual attention implementation. This warning can be removed using " 'the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions, ) bsz, q_len, _ = hidden_states.size() hidden_shape = (hidden_states.shape[0], -1, self.num_attention_heads, self.attention_head_size) query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2) key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2) value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2) # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. input_dtype = query_layer.dtype device_type = query_layer.device.type if query_layer.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.query.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_layer = query_layer.to(target_dtype) key_layer = key_layer.to(target_dtype) value_layer = value_layer.to(target_dtype) # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim). # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent, # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original # ESM code and fix rotary embeddings. query_layer = query_layer * self.attention_head_size**-0.5 if self.position_embedding_type == "rotary": query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) elif self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": raise ValueError(f"ESM flash attention does not support {self.position_embedding_type} embeddings") # It would likely be faster to change self.transpose_for_scores to output the correct # dimensions for flash_attention_2, but that would also mean changing the rotary embedding # functions. Here we just permute the dimensions to match the expected input. attn_output = _flash_attention_forward( query_layer.permute(0, 2, 1, 3), key_layer.permute(0, 2, 1, 3), value_layer.permute(0, 2, 1, 3), attention_mask, query_length=q_len, is_causal=self.is_decoder, softmax_scale=1.0, dropout=self.dropout_prob if self.training else 0.0, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1) outputs = (attn_output, None) if self.is_decoder: outputs = outputs + (None,) return outputs ESM_ATTENTION_CLASSES = { "eager": EsmSelfAttention, "flash_attention_2": EsmFlashAttention2, } class EsmAttention(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() self.self = ESM_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.output = EsmSelfOutput(config) self.pruned_heads = set() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, cache_position=None, ): hidden_states_ln = self.LayerNorm(hidden_states) self_outputs = self.self( hidden_states_ln, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class EsmIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = gelu(hidden_states) return hidden_states class EsmOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states class EsmLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = EsmAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = EsmAttention(config) self.intermediate = EsmIntermediate(config) self.output = EsmOutput(config) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, cache_position=None, ): self_attention_outputs = self.attention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise AttributeError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated" " with cross-attention layers by setting `config.add_cross_attention=True`" ) cross_attention_outputs = self.crossattention( attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights layer_output = self.feed_forward_chunk(attention_output) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (None,) return outputs def feed_forward_chunk(self, attention_output): attention_output_ln = self.LayerNorm(attention_output) intermediate_output = self.intermediate(attention_output_ln) layer_output = self.output(intermediate_output, attention_output) return layer_output class EsmEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)]) self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False @can_return_tuple def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, cache_position=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if self.emb_layer_norm_after: hidden_states = self.emb_layer_norm_after(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutputWithCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class EsmPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @auto_docstring class EsmPreTrainedModel(PreTrainedModel): config: EsmConfig base_model_prefix = "esm" supports_gradient_checkpointing = True _no_split_modules = ["EsmLayer", "EsmFoldTriangularSelfAttentionBlock", "EsmEmbeddings"] _keys_to_ignore_on_load_unexpected = ["position_embeddings.weight"] _supports_flash_attn = True # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with BertLMPredictionHead->EsmLMHead def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, EsmLMHead): module.bias.data.zero_() def get_output_embeddings(self): # NOTE: get_output_embeddings() must return None to prevent accidental weight tying. # See e.g. https://github.com/huggingface/transformers/pull/39339#discussion_r2219126400 return None @auto_docstring class EsmModel(EsmPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = EsmEmbeddings(config) self.encoder = EsmEncoder(config) self.pooler = EsmPooler(config) if add_pooling_layer else None self.contact_head = EsmContactPredictionHead( in_features=config.num_hidden_layers * config.num_attention_heads, bias=True ) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" input_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) position_ids (`torch.LongTensor` of shape `((batch_size, sequence_length))`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `((batch_size, sequence_length), hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if self.config._attn_implementation == "flash_attention_2": extended_attention_mask = attention_mask else: # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def predict_contacts(self, tokens, attention_mask): attns = self(tokens, attention_mask=attention_mask, return_dict=True, output_attentions=True).attentions attns = torch.stack(attns, dim=1) # Matches the original model layout # In the original model, attentions for padding tokens are completely zeroed out. # This makes no difference most of the time because the other tokens won't attend to them, # but it does for the contact prediction task, which takes attentions as input, # so we have to mimic that here. attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(3) attns *= attention_mask.unsqueeze(1).unsqueeze(2).unsqueeze(4) return self.contact_head(tokens, attns) @auto_docstring class EsmForMaskedLM(EsmPreTrainedModel): _tied_weights_keys = ["lm_head.decoder.weight"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.esm = EsmModel(config, add_pooling_layer=False) self.lm_head = EsmLMHead(config) self.init_weights() self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(prediction_scores.device) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def predict_contacts(self, tokens, attention_mask): return self.esm.predict_contacts(tokens, attention_mask=attention_mask) class EsmLMHead(nn.Module): """ESM Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) + self.bias return x @auto_docstring( custom_intro=""" ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class EsmForSequenceClassification(EsmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.esm = EsmModel(config, add_pooling_layer=False) self.classifier = EsmClassificationHead(config) self.init_weights() self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class EsmForTokenClassification(EsmPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.esm = EsmModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.esm( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class EsmClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx __all__ = [ "EsmForMaskedLM", "EsmForSequenceClassification", "EsmForTokenClassification", "EsmModel", "EsmPreTrainedModel", ]
transformers/src/transformers/models/esm/modeling_esm.py/0
{ "file_path": "transformers/src/transformers/models/esm/modeling_esm.py", "repo_id": "transformers", "token_count": 22440 }
492
# coding=utf-8 # Copyright 2025 Westlake Representational Learning Lab (Fajie Yuan Lab) team and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import Tensor, nn from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_outputs import ( BaseModelOutputWithPast, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, ModelOutput, ) from ...modeling_utils import ModuleUtilsMixin, PreTrainedModel, get_parameter_dtype from ...utils import ( auto_docstring, can_return_tuple, logging, ) from ...utils.deprecation import deprecate_kwarg from ...utils.generic import check_model_inputs from ..esm.modeling_esm import ( EsmAttention, EsmEmbeddings, EsmEncoder, EsmIntermediate, EsmLayer, EsmOutput, EsmPooler, EsmSelfAttention, EsmSelfOutput, ) from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaMLP, LlamaPreTrainedModel, LlamaRMSNorm, LlamaRotaryEmbedding, ) from .configuration_evolla import EvollaConfig, SaProtConfig logger = logging.get_logger(__name__) class EvollaSaProtEmbeddings(EsmEmbeddings): def __init__(self, config): super().__init__() # remove the position_ids in EsmEmbeddings self.position_ids = None def rotate_half_esm(x): x1, x2 = x.chunk(2, dim=-1) return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb_esm(x, cos, sin): cos = cos[:, :, : x.shape[-2], :] sin = sin[:, :, : x.shape[-2], :] return (x * cos) + (rotate_half_esm(x) * sin) class EvollaSaProtRotaryEmbedding(nn.Module): """ Rotary position embeddings based on those in [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation matrices which depend on their relative positions. """ inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim: int): super().__init__() # Generate and save the inverse frequency buffer (non trainable) inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) inv_freq = inv_freq self.register_buffer("inv_freq", inv_freq) self._seq_len_cached = None self._cos_cached = None self._sin_cached = None def _update_cos_sin_tables(self, x, seq_dimension=2): seq_len = x.shape[seq_dimension] # Reset the tables if the sequence length has changed, # or if we're on a new device (possibly due to tracing for instance) if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: self._seq_len_cached = seq_len t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self._cos_cached = emb.cos()[None, None, :, :] self._sin_cached = emb.sin()[None, None, :, :] return self._cos_cached, self._sin_cached def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2) return ( apply_rotary_pos_emb_esm(q, self._cos_cached, self._sin_cached), apply_rotary_pos_emb_esm(k, self._cos_cached, self._sin_cached), ) class EvollaSaProtSelfAttention(EsmSelfAttention, nn.Module): def __init__(self, config, position_embedding_type=None, layer_idx=None): nn.Module.__init__(self) self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) self.rotary_embeddings = None if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) elif self.position_embedding_type == "rotary": self.rotary_embeddings = EvollaSaProtRotaryEmbedding(dim=self.attention_head_size) self.is_decoder = config.is_decoder self.layer_idx = layer_idx class EvollaSaProtSelfOutput(EsmSelfOutput): pass class EvollaSaProtAttention(EsmAttention): pass class EvollaSaProtIntermediate(EsmIntermediate): pass class EvollaSaProtOutput(EsmOutput): pass class EvollaSaProtLayer(EsmLayer): pass class EvollaSaProtEncoder(EsmEncoder): pass class EvollaSaProtPooler(EsmPooler): pass @auto_docstring class EvollaSaProtPreTrainedModel(PreTrainedModel): config: SaProtConfig _no_split_modules = ["EvollaSaProtLayer"] def _init_weights(self, module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class EvollaSaProtProteinEncoder(EvollaSaProtPreTrainedModel): def __init__(self, config: SaProtConfig): super().__init__(config) self.embeddings = EvollaSaProtEmbeddings(config) self.encoder = EvollaSaProtEncoder(config) def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @can_return_tuple def forward( self, input_ids: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) inputs_embeds = self.embeddings(input_ids=input_ids, attention_mask=attention_mask) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) encoder_outputs = self.encoder(inputs_embeds, attention_mask=extended_attention_mask) sequence_output = encoder_outputs[0] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: tuple[int], device: torch.device = None, dtype: torch.float = None ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (`Tuple[int]`): The shape of the input to the model. Returns: `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. """ if dtype is None: dtype = get_parameter_dtype(self) if not (attention_mask.dim() == 2 and self.config.is_decoder): # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder` if device is not None: warnings.warn( "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder( input_shape, attention_mask, device ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})" ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min return extended_attention_mask class EvollaSequenceCompressorAttention(nn.Module): def __init__(self, dim, dim_head=64, heads=8): super().__init__() self.scale = dim_head**-0.5 self.heads = heads inner_dim = dim_head * heads self.norm_media = nn.LayerNorm(dim) self.norm_latents = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, dim, bias=False) def forward(self, x, latents, mask): """ Args: x (torch.Tensor): image features shape (b, n1, D) latent (torch.Tensor): latent features shape (b, n2, D); n2: num of latent tokens """ x = self.norm_media(x) latents = self.norm_latents(latents) h = self.heads q = self.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) k, v = self.to_kv(kv_input).chunk( 2, dim=-1 ) # each: batch_size, max_protein_length+num_latents, dim_head*num_heads q = q.view(q.size(0), q.size(1), h, -1).permute(0, 2, 1, 3) k = k.view(k.size(0), k.size(1), h, -1).permute(0, 2, 1, 3) v = v.view(v.size(0), v.size(1), h, -1).permute(0, 2, 1, 3) q = q * self.scale # batch_size, num_heads, num_latents, dim_head # attention sim = torch.matmul(q, k.transpose(-1, -2)) sim = sim - sim.amax(dim=-1, keepdim=True).detach() bs, nh, skd, okd = sim.shape ones = torch.ones(nh, skd).to(mask.device) # Create a tensor of ones with shape (nh, skd) mask_exp = mask[:, None, None, :] ones_exp = ones[None, :, :, None] mask = mask_exp * ones_exp sim = sim.masked_fill((1 - mask).bool(), -1e4) attn = sim.softmax(dim=-1) out = torch.matmul(attn, v) out = out.permute(0, 2, 1, 3) # [batch, seq, head, features] -> [batch, seq, head*features] out = out.reshape(out.size(0), out.size(1), -1) return self.to_out(out) class EvollaFeedForward(nn.Module): def __init__(self, dim, mult=4): super().__init__() inner_dim = int(dim * mult) self.norm = nn.LayerNorm(dim) self.fc1 = nn.Linear(dim, inner_dim, bias=False) self.activation = nn.GELU() self.fc2 = nn.Linear(inner_dim, dim, bias=False) def forward(self, x): return self.fc2(self.activation(self.fc1(self.norm(x)))) class EvollaSequenceCompressorResampler(nn.Module): def __init__(self, config: EvollaConfig): super().__init__() protein_repr_dim = config.protein_encoder_config.hidden_size self.num_latents = config.resampler_num_latents self.latents = nn.Parameter(torch.randn(self.num_latents, protein_repr_dim), requires_grad=True) self.layers = nn.ModuleList([]) for _ in range(config.resampler_depth): self.layers.append( nn.ModuleList( [ EvollaSequenceCompressorAttention( dim=protein_repr_dim, dim_head=config.resampler_dim_head, heads=config.resampler_heads ), EvollaFeedForward(dim=protein_repr_dim, mult=config.resampler_ff_mult), ] ) ) self.norm = nn.LayerNorm(config.hidden_size) self.protein_projector = nn.Linear(protein_repr_dim, config.hidden_size) def forward(self, embeds, mask): b = embeds.shape[0] bs, _ = mask.shape # bs, max_protein_length latent_mask = torch.ones(bs, self.num_latents).to(mask.device) mask = torch.cat((mask, latent_mask), dim=1) # bs, max_protein_length + num_latents # blocks ones = torch.ones(b).to(self.latents.device) latents = self.latents[None] * ones.view(-1, 1, 1) # [b,n,d] latents = latents.to(embeds.dtype) for attn, ff in self.layers: latents = attn(embeds, latents, mask) + latents latents = ff(latents) + latents transformed_feature = self.protein_projector(latents) return self.norm(transformed_feature) @dataclass @auto_docstring class EvollaProteinEncoderModelOutput(ModelOutput): sequence_compressor_output: torch.FloatTensor = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None class EvollaProteinEncoder(nn.Module): def __init__(self, config: EvollaConfig): super().__init__() self.model = EvollaSaProtProteinEncoder(config=config.protein_encoder_config) self.sequence_compressor_resampler = EvollaSequenceCompressorResampler(config=config) @can_return_tuple def forward(self, input_ids: torch.LongTensor, attention_mask: torch.FloatTensor, **kwargs): protein_output = self.model(input_ids=input_ids, attention_mask=attention_mask) protein_embeds = protein_output.last_hidden_state sequence_repr = self.sequence_compressor_resampler(protein_embeds, attention_mask) return EvollaProteinEncoderModelOutput( sequence_compressor_output=sequence_repr, last_hidden_state=protein_output.last_hidden_state, ) class EvollaSequenceAlignerCrossAttention(nn.Module): def __init__( self, config, protein_encoder_dim: Optional[int] = None, structure_encoder_dim: Optional[int] = None, msa_encoder_dim: Optional[int] = None, ): super().__init__() self.hidden_size = config.hidden_size self.num_attention_heads = config.num_attention_heads self.scale = self.num_attention_heads**-0.5 self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size attention_probs_dropout_prob = config.aligner_attention_probs_dropout_prob enable_bias = config.aligner_enable_bias ffn_mult = config.aligner_ffn_mult self.query = nn.Linear(self.hidden_size, self.all_head_size) if protein_encoder_dim is not None: self.key_protein = nn.Linear(protein_encoder_dim, self.all_head_size) self.value_protein = nn.Linear(protein_encoder_dim, self.all_head_size) else: self.key_protein = None self.value_protein = None if structure_encoder_dim is not None: self.key_structure = nn.Linear(structure_encoder_dim, self.all_head_size) self.value_structure = nn.Linear(structure_encoder_dim, self.all_head_size) else: self.key_structure = None self.value_structure = None if msa_encoder_dim is not None: self.key_msa = nn.Linear(msa_encoder_dim, self.all_head_size) self.value_msa = nn.Linear(msa_encoder_dim, self.all_head_size) else: self.key_msa = None self.value_msa = None self.attention_norm = EvollaRMSNorm(self.hidden_size) self.dropout = nn.Dropout(attention_probs_dropout_prob) self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=enable_bias) self.ff = EvollaFeedForward(self.hidden_size, ffn_mult) self.gate_attention = nn.Parameter(torch.tensor([0.0])) self.gate_ffw = nn.Parameter(torch.tensor([0.0])) def cross_attention( self, query_states, protein_key_value_states, structure_key_value_states, msa_key_value_states, query_attn_mask, protein_kv_attn_mask, structure_kv_attn_mask, msa_kv_attn_mask, ): """ query_states: text key_value_states: protein query_states: [bs, query_seq_len, dim] key_value_states: [bs, kv_seq_len, dim] query_attn_mask: [bs, query_seq_len] kv_attn_mask: [bs, kv_seq_len] """ # Concatenate protein and structure kv_attn_mask = [protein_kv_attn_mask, structure_kv_attn_mask, msa_kv_attn_mask] kv_attn_mask = [_ for _ in kv_attn_mask if _ is not None] if not kv_attn_mask: raise ValueError("At least one modality should be provided for cross attention.") kv_attn_mask = torch.cat(kv_attn_mask, dim=1) query_layer = self.attention_norm(query_states) # Warning: This place might cause issues, refers to # https://discuss.pytorch.org/t/cuda-error-cublas-status-not-supported-when-calling-cublasltmatmul-from-torch-nn-functional-linear/170214/13 # Solution: add `DISABLE_ADDMM_CUDA_LT=1` as environment variable # Apply linear transformation to input_query, input_key, and input_value query_layer = self.query(query_layer) # [bs, querylength, dim] if self.key_protein is not None and self.value_protein is not None: protein_key_value_states = protein_key_value_states.to(query_states) key_layer_protein = self.key_protein(protein_key_value_states) # [bs, keylength, dim] value_layer_protein = self.value_protein(protein_key_value_states) # [bs, keylength, dim] else: key_layer_protein = None value_layer_protein = None if self.key_structure is not None and self.value_structure is not None: structure_key_value_states = structure_key_value_states.to(query_states) key_layer_structure = self.key_structure(structure_key_value_states) # [bs, keylength, dim] value_layer_structure = self.value_structure(structure_key_value_states) # [bs, keylength, dim] else: key_layer_structure = None value_layer_structure = None if self.key_msa is not None and self.value_msa is not None: msa_key_value_states = msa_key_value_states.to(query_states) key_layer_msa = self.key_msa(msa_key_value_states) # [bs, keylength, dim] value_layer_msa = self.value_msa(msa_key_value_states) # [bs, keylength, dim] else: key_layer_msa = None value_layer_msa = None key_layer = [key_layer_protein, key_layer_structure, key_layer_msa] key_layer = [_ for _ in key_layer if _ is not None] key_layer = torch.cat(key_layer, dim=1) value_layer = [value_layer_protein, value_layer_structure, value_layer_msa] value_layer = [_ for _ in value_layer if _ is not None] value_layer = torch.cat(value_layer, dim=1) new_query_layer_shape = query_layer.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) query_layer = query_layer.view(*new_query_layer_shape).permute(0, 2, 1, 3) new_key_layer_shape = key_layer.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) key_layer = key_layer.view(*new_key_layer_shape).permute(0, 2, 1, 3) new_value_layer_shape = value_layer.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) value_layer = value_layer.view(*new_value_layer_shape).permute(0, 2, 1, 3) query_layer = query_layer * self.scale # attention_mask: [bs, 1, querylength, keylength] if query_attn_mask is None: query_attn_mask = torch.ones(query_states.size(0), query_states.size(1)).to(query_states.device) attention_mask = query_attn_mask[:, None, :, None] * kv_attn_mask[:, None, None, :] # Compute the scaled dot-product attention scores attn_weights = torch.matmul(query_layer, key_layer.transpose(-1, -2)) # [bs, numheads, querylength, keylength] attn_weights = attn_weights - attn_weights.amax(dim=-1, keepdim=True).detach() # To stablize score attention_scores = attn_weights.masked_fill( (1 - attention_mask).bool(), torch.finfo(attn_weights.dtype).min ) # [bs, numheads, querylength, keylength] attention_probs = nn.Softmax(dim=-1)(attention_scores) # attention_probs_dropped = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) # [bs, numheads, querylength, dim/numheads] context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) context_layer = self.out_proj(context_layer) return context_layer @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, query_states, protein_kv_states, structure_kv_states, msa_kv_states, query_attn_mask, protein_kv_attn_mask=None, structure_kv_attn_mask=None, msa_kv_attn_mask=None, protein_batch_mask=None, structure_batch_mask=None, msa_batch_mask=None, past_key_values=None, ): if protein_kv_states is not None: bs, protein_kv_seq_len, dim = protein_kv_states.shape if protein_kv_attn_mask is None: protein_kv_attn_mask = ( torch.ones(bs, protein_kv_seq_len).to(protein_batch_mask.device) * protein_batch_mask.expand(size=(protein_kv_seq_len, bs)).T ).to(protein_kv_states.device) else: protein_kv_attn_mask = None if structure_kv_states is not None: bs, structure_kv_seq_len, dim = structure_kv_states.shape if structure_kv_attn_mask is None: structure_kv_attn_mask = ( torch.ones(bs, structure_kv_seq_len).to(protein_batch_mask.device) * structure_batch_mask.expand(size=(structure_kv_seq_len, bs)).T ).to(structure_kv_states.device) else: structure_kv_attn_mask = None if msa_kv_states is not None: bs, msa_kv_seq_len, dim = msa_kv_states.shape if msa_kv_attn_mask is None: msa_kv_attn_mask = ( torch.ones(bs, msa_kv_seq_len).to(protein_batch_mask.device) * msa_batch_mask.expand(size=(msa_kv_seq_len, bs)).T ).to(msa_kv_states.device) else: msa_kv_attn_mask = None hidden_states = query_states # only when there's at least one valid modality, crossattention will be performed if ( (protein_kv_states is not None and protein_kv_attn_mask.any()) or (structure_kv_states is not None and structure_kv_attn_mask.any()) or (msa_kv_states is not None and msa_kv_attn_mask.any()) ): residual = hidden_states hidden_states = self.cross_attention( query_states=hidden_states, protein_key_value_states=protein_kv_states, structure_key_value_states=structure_kv_states, msa_key_value_states=msa_kv_states, query_attn_mask=query_attn_mask, protein_kv_attn_mask=protein_kv_attn_mask, structure_kv_attn_mask=structure_kv_attn_mask, msa_kv_attn_mask=msa_kv_attn_mask, ) # [bs, query_seq_len, dim] # tanh gate hidden_states = torch.tanh(self.gate_attention) * hidden_states hidden_states = residual + hidden_states # input_query residual = hidden_states hidden_states = self.ff(hidden_states) * torch.tanh(self.gate_ffw) hidden_states = residual + hidden_states return hidden_states class EvollaRMSNorm(LlamaRMSNorm): pass class EvollaRotaryEmbedding(LlamaRotaryEmbedding): pass class EvollaMLP(LlamaMLP): pass class EvollaAttention(LlamaAttention): pass class EvollaDecoderLayer(LlamaDecoderLayer): def __init__(self, config: EvollaConfig, layer_idx: int): super().__init__(config, layer_idx) if (layer_idx + 1) % max(config.num_hidden_layers // config.aligner_num_add_layers, 1) == 0: self.adapter = EvollaSequenceAlignerCrossAttention( config, protein_encoder_dim=config.hidden_size, ) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, protein_kv_states: Optional[torch.Tensor] = None, structure_kv_states: Optional[torch.Tensor] = None, msa_kv_states: Optional[torch.Tensor] = None, protein_batch_mask: Optional[torch.Tensor] = None, structure_batch_mask: Optional[torch.Tensor] = None, msa_batch_mask: Optional[torch.Tensor] = None, query_attn_mask: Optional[torch.Tensor] = None, **kwargs, ): residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states if hasattr(self, "adapter"): hidden_states = self.adapter( query_states=hidden_states, protein_kv_states=protein_kv_states, structure_kv_states=structure_kv_states, msa_kv_states=msa_kv_states, query_attn_mask=query_attn_mask, protein_batch_mask=protein_batch_mask, structure_batch_mask=structure_batch_mask, msa_batch_mask=msa_batch_mask, ) return hidden_states class EvollaPreTrainedModel(LlamaPreTrainedModel): _supports_flash_attn = False # see dependency on `EvollaSaProtProteinEncoder` _supports_flex_attn = False # see dependency on `EvollaSaProtProteinEncoder` _supports_attention_backend = False _no_split_modules = [ "EvollaDecoderLayer", "EvollaSequenceCompressorResampler", "EvollaSequenceAlignerCrossAttention", ] def _init_weights(self, module): std = self.config.initializer_range LlamaPreTrainedModel._init_weights(self, module) if isinstance(module, EvollaSequenceAlignerCrossAttention): module.gate_attention.zero_() module.gate_ffw.zero_() module.attention_norm.weight.data.fill_(1.0) elif isinstance(module, EvollaSequenceCompressorResampler): module.latents.data.normal_(mean=0.0, std=std) class EvollaModel(EvollaPreTrainedModel): def __init__(self, config: EvollaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(self.vocab_size, config.hidden_size, self.padding_idx) self.protein_encoder = EvollaProteinEncoder(config=config) self.layers = nn.ModuleList( [ EvollaDecoderLayer( config=config, layer_idx=layer_idx, ) for layer_idx in range(config.num_hidden_layers) ] ) self.norm = EvollaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = EvollaRotaryEmbedding(config=config) self.gradient_checkpointing = getattr(config, "gradient_checkpointing", False) self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value @auto_docstring @check_model_inputs def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, protein_input_ids: Optional[torch.LongTensor] = None, protein_attention_mask: Optional[torch.Tensor] = None, structure_feats: Optional[torch.FloatTensor] = None, msa_feats: Optional[torch.FloatTensor] = None, structure_batch_mask: Optional[torch.Tensor] = None, msa_batch_mask: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPast]: r""" protein_input_ids (torch.LongTensor): The input IDs for the protein sequence in structure-aware tokens. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`. protein_attention_mask (torch.Tensor): The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`. structure_feats (torch.FloatTensor): The input IDs for purely structure-based features. Should be of shape `(batch_size, structure_seq_length, structure_feat_dim)` and type `torch.FloatTensor`. Dummy input for now. msa_feats (torch.FloatTensor): The input IDs for purely MSA-based features. Should be of shape `(batch_size, msa_seq_length, msa_feat_dim)` and type `torch.FloatTensor`. Dummy input for now. structure_batch_mask (torch.Tensor): The batch mask to decide which protein sequences are purely structure-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `structure_feats`. Dummpy input for now. msa_batch_mask (torch.Tensor): The batch mask to decide which protein sequences are purely MSA-based. Should be of shape `(batch_size)` and type `torch.Tensor`. Should be paired with `msa_feats`. Dummpy input for now. """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) protein_feats = None protein_batch_mask = None # If provided, actually compute them if protein_input_ids is not None and protein_attention_mask is not None: protein_outputs = self.protein_encoder( input_ids=protein_input_ids, attention_mask=protein_attention_mask, ) protein_feats = protein_outputs.sequence_compressor_output protein_batch_mask = torch.tensor([True] * protein_input_ids.shape[0], device=protein_input_ids.device) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, protein_kv_states=protein_feats, structure_kv_states=structure_feats, msa_kv_states=msa_feats, protein_batch_mask=protein_batch_mask, structure_batch_mask=structure_batch_mask, msa_batch_mask=msa_batch_mask, query_attn_mask=attention_mask, **kwargs, ) hidden_states = self.norm(hidden_states) output = BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, ) return output class EvollaForProteinText2Text(EvollaPreTrainedModel, GenerationMixin): def __init__(self, config): super().__init__(config) self.model = EvollaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, self.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): return self.model.set_input_embeddings(value) @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, # text input ids attention_mask: Optional[torch.Tensor] = None, # text attention mask inputs_embeds: Optional[torch.FloatTensor] = None, # text input embeddings labels: Optional[torch.LongTensor] = None, protein_input_ids: torch.LongTensor = None, protein_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, **kwargs, ): r""" protein_input_ids (torch.LongTensor): The input IDs for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.LongTensor`. protein_attention_mask (torch.Tensor): The attention mask for the protein sequence. Should be of shape `(batch_size, protein_seq_length)` and type `torch.Tensor`. Example: ```python >>> from transformers import EvollaProcessor, EvollaForProteinText2Text >>> model = EvollaForProteinText2Text.from_pretrained("westlake/Evolla-10B-hf") >>> processor = EvollaProcessor.from_pretrained("westlake/Evolla-10B-hf") >>> protein_information = { "aa_seq": "your amino acid sequence", "foldseek": "your foldseek sequence", } >>> question = "What is the function of this protein?" >>> message = [ {"role": "system", "content": "You are an AI expert that can answer any questions about protein."}, {"role": "user", "content": question}, ] >>> inputs = processor(proteins=[protein_information], messages_list=[message], return_tensors="pt", padding="longest") >>> outputs = model.generate(**inputs) >>> print(processor.batch_decode(outputs, skip_special_tokens=True)) ```""" outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, protein_input_ids=protein_input_ids, protein_attention_mask=protein_attention_mask, use_cache=use_cache, **kwargs, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.vocab_size, **kwargs) lm_outputs = CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return lm_outputs __all__ = ["EvollaForProteinText2Text", "EvollaModel", "EvollaPreTrainedModel"]
transformers/src/transformers/models/evolla/modular_evolla.py/0
{ "file_path": "transformers/src/transformers/models/evolla/modular_evolla.py", "repo_id": "transformers", "token_count": 18304 }
493
# coding=utf-8 # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """FLAVA model configurations""" from typing import Any, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class FlavaImageConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. mask_token (`bool`, *optional*, defaults to `True`): Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA. vocab_size (`int`, *optional*, defaults to 8192): Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked Image Modeling) loss for FLAVA. Example: ```python >>> from transformers import FlavaImageConfig, FlavaImageModel >>> # Initializing a FlavaImageModel with style configuration >>> configuration = FlavaImageConfig() >>> # Initializing a FlavaImageModel model (with random weights) from the style configuration >>> model = FlavaImageModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_image_model" base_config_key = "image_config" def __init__( self, hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: int = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, image_size: int = 224, patch_size: int = 16, num_channels: int = 3, qkv_bias: bool = True, mask_token: bool = True, vocab_size: int = 8192, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size class FlavaTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`FlavaTextModel`]. type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is used similar to RoBERTa. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658). hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. Example: ```python >>> from transformers import FlavaTextConfig, FlavaTextModel >>> # Initializing a FlavaTextModel with style configuration >>> configuration = FlavaTextConfig() >>> # Initializing a FlavaTextModel model (with random weights) from the style configuration >>> model = FlavaTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_text_model" base_config_key = "text_config" def __init__( self, vocab_size: int = 30522, type_vocab_size: int = 2, max_position_embeddings: int = 512, position_embedding_type: str = "absolute", hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.0, attention_probs_dropout_prob: float = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, pad_token_id: int = 0, qkv_bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id class FlavaMultimodalConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 6): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_cls_token (`bool`, *optional*, defaults to `True`): Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model. Example: ```python >>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel >>> # Initializing a FlavaMultimodalModel with style configuration >>> configuration = FlavaMultimodalConfig() >>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration >>> model = FlavaMultimodalModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "flava_multimodal_model" base_config_key = "multimodal_config" def __init__( self, hidden_size: int = 768, num_hidden_layers: int = 6, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: int = "gelu", hidden_dropout_prob: int = 0.0, attention_probs_dropout_prob: int = 0.0, initializer_range: float = 0.02, layer_norm_eps: float = 1e-12, qkv_bias: bool = True, use_cls_token: bool = True, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_cls_token = use_cls_token class FlavaImageCodebookConfig(PretrainedConfig): model_type = "flava_image_codebook" base_config_key = "image_codebook_config" r""" [`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_groups (`int`, *optional*, defaults to 4): Number of groups to be created. This parameter as of now doesn't affect the model and is used for some internal calculation and estimations. input_channels (`int`, *optional*, defaults to 3): Number of channels in the image to be passed. num_blocks_per_group (`int`, *optional*, defaults to 2): Number of conv-based blocks per group. hidden_size (`int`, *optional*, defaults to 256): Size of hidden dim for the blocks. vocab_size (`int`, *optional*, defaults to 8192): Size of the output vocabulary for the codebook. freeze (`bool`, defaults to `True`): Whether to freeze the weights of the model. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook >>> # Initializing a FlavaImageCodebook with style configuration >>> configuration = FlavaImageCodebookConfig() >>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration >>> model = FlavaImageCodebook(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ def __init__( self, num_groups: int = 4, input_channels: int = 3, num_blocks_per_group: int = 2, hidden_size: int = 256, vocab_size: int = 8192, freeze: int = True, initializer_range: float = 0.02, **kwargs, ): super().__init__(**kwargs) self.num_groups = num_groups self.input_channels = input_channels self.num_blocks_per_group = num_blocks_per_group self.hidden_size = hidden_size self.vocab_size = vocab_size self.freeze = freeze self.initializer_range = initializer_range class FlavaConfig(PretrainedConfig): r""" [`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaTextConfig`]. image_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaImageConfig`]. multimodal_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and image projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP implementation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ce_ignore_index (`int`, *optional*, defaults to -100): Cross entropy index to ignore. mim_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MIM (Masked Image Modeling) unimodal loss mlm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MLM (Masked Language Modeling) unimodal loss global_contrastive_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to global contrastive cross-alignment loss. itm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to image-text matching multimodal loss. mmm_image_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's image part. mmm_text_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's text part. global_backprop_contrastive (`bool`, *optional*, defaults to `True`): Whether to use global backpropgation through all workers in contrastive loss. skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`): Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses. return_loss (`bool`, *optional*, defaults to `True`): Whether to return loss or not kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining >>> # Initializing a FlavaConfig with style configuration >>> configuration = FlavaConfig() >>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration >>> model = FlavaModel(configuration) >>> model_pre = FlavaForPreTraining(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> configuration_pre = model_pre.config ``` """ model_type = "flava" sub_configs = { "text_config": FlavaTextConfig, "image_config": FlavaImageConfig, "multimodal_config": FlavaMultimodalConfig, "image_codebook_config": FlavaImageCodebookConfig, } def __init__( self, image_config: Optional[dict[str, Any]] = None, text_config: Optional[dict[str, Any]] = None, multimodal_config: Optional[dict[str, Any]] = None, image_codebook_config: Optional[dict[str, Any]] = None, hidden_size: int = 768, layer_norm_eps: float = 1e-12, projection_dim: int = 768, init_codebook: bool = True, logit_scale_init_value: float = 2.6592, initializer_range: float = 0.02, ce_ignore_index: int = -100, mim_weight: float = 1.0, mlm_weight: float = 1.0, global_contrastive_weight: float = 1.0, itm_weight: float = 1.0, mmm_image_weight: float = 1.0, mmm_text_weight: float = 1.0, global_backprop_contrastive: bool = True, skip_unmasked_multimodal_encoder: bool = True, return_loss: bool = True, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) image_config_dict = kwargs.pop("image_config_dict", None) multimodal_config_dict = kwargs.pop("multimodal_config_dict", None) image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None) super().__init__(**kwargs) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The " f'value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if image_config_dict is not None: if image_config is None: image_config = {} # This is the complete result when using `image_config_dict`. _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _image_config_dict: _image_config_dict["id2label"] = { str(key): value for key, value in _image_config_dict["id2label"].items() } # Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different. for key, value in _image_config_dict.items(): if key in image_config and value != image_config[key] and key not in ["transformers_version"]: # If specified in `image_config_dict` if key in image_config_dict: message = ( f"`{key}` is found in both `image_config_dict` and `image_config` but with different " f'values. The value `image_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. " f'The value `image_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_config` with the ones in `_image_config_dict`. image_config.update(_image_config_dict) if multimodal_config_dict is not None: if multimodal_config is None: multimodal_config = {} # This is the complete result when using `multimodal_config_dict`. _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict() # Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being # different. for key, value in _multimodal_config_dict.items(): if ( key in multimodal_config and value != multimodal_config[key] and key not in ["transformers_version"] ): # If specified in `multimodal_config_dict` if key in multimodal_config_dict: message = ( f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with " f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`multimodal_config_dict` is provided which will be used to initialize " f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`. multimodal_config.update(_multimodal_config_dict) if image_codebook_config_dict is not None: if image_codebook_config is None: image_codebook_config = {} # This is the complete result when using `image_codebook_config_dict`. _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict() # Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but # being different. for key, value in _image_codebook_config_dict.items(): if ( key in image_codebook_config and value != image_codebook_config[key] and key not in ["transformers_version"] ): # If specified in `image_codebook_config_dict` if key in image_codebook_config_dict: message = ( f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but " f'with different values. The value `image_codebook_config_dict["{key}"]` will be used ' "instead." ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_codebook_config_dict` is provided which will be used to initialize " f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`. image_codebook_config.update(_image_codebook_config_dict) if image_config is None: image_config = {} logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.") if text_config is None: text_config = {} logger.info("`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.") if multimodal_config is None: multimodal_config = {} logger.info("`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.") if image_codebook_config is None: image_codebook_config = {} logger.info( "`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values." ) self.image_config = FlavaImageConfig(**image_config) self.text_config = FlavaTextConfig(**text_config) self.multimodal_config = FlavaMultimodalConfig(**multimodal_config) self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config) self.projection_dim = projection_dim self.init_codebook = init_codebook self.hidden_size = hidden_size self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.ce_ignore_index = ce_ignore_index self.mim_weight = mim_weight self.mlm_weight = mlm_weight self.global_contrastive_weight = global_contrastive_weight self.itm_weight = itm_weight self.mmm_image_weight = mmm_image_weight self.mmm_text_weight = mmm_text_weight self.global_backprop_contrastive = global_backprop_contrastive self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder self.return_loss = return_loss @classmethod def from_configs( cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs, ): r""" Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model configuration, flava multimodal model and flava codebook model configuration. Returns: [`FlavaConfig`]: An instance of a configuration object """ return cls( image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs, ) __all__ = ["FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", "FlavaMultimodalConfig", "FlavaTextConfig"]
transformers/src/transformers/models/flava/configuration_flava.py/0
{ "file_path": "transformers/src/transformers/models/flava/configuration_flava.py", "repo_id": "transformers", "token_count": 13789 }
494
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert FNet checkpoint.""" import argparse import torch from flax.training.checkpoints import restore_checkpoint from transformers import FNetConfig, FNetForPreTraining from transformers.utils import logging logging.set_verbosity_info() def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, fnet_config_file, save_path): # Initialise PyTorch model config = FNetConfig.from_json_file(fnet_config_file) print(f"Building PyTorch model from configuration: {config}") fnet_pretraining_model = FNetForPreTraining(config) checkpoint_dict = restore_checkpoint(flax_checkpoint_path, None) pretrained_model_params = checkpoint_dict["target"] # Embeddings # Position IDs state_dict = fnet_pretraining_model.state_dict() position_ids = state_dict["fnet.embeddings.position_ids"] new_state_dict = {"fnet.embeddings.position_ids": position_ids} # Embedding Layers new_state_dict["fnet.embeddings.word_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["word"]["embedding"] ) new_state_dict["fnet.embeddings.position_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["position"]["embedding"][0] ) new_state_dict["fnet.embeddings.token_type_embeddings.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["type"]["embedding"] ) new_state_dict["fnet.embeddings.projection.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["kernel"] ).T new_state_dict["fnet.embeddings.projection.bias"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["hidden_mapping_in"]["bias"] ) new_state_dict["fnet.embeddings.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["layer_norm"]["scale"] ) new_state_dict["fnet.embeddings.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["layer_norm"]["bias"] ) # Encoder Layers for layer in range(config.num_hidden_layers): new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["scale"] ) new_state_dict[f"fnet.encoder.layer.{layer}.fourier.output.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["mixing_layer_norm"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.weight"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["kernel"] ).T new_state_dict[f"fnet.encoder.layer.{layer}.intermediate.dense.bias"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["intermediate"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.weight"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["kernel"] ).T new_state_dict[f"fnet.encoder.layer.{layer}.output.dense.bias"] = torch.tensor( pretrained_model_params["encoder"][f"feed_forward_{layer}"]["output"]["bias"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.weight"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["scale"] ) new_state_dict[f"fnet.encoder.layer.{layer}.output.LayerNorm.bias"] = torch.tensor( pretrained_model_params["encoder"][f"encoder_{layer}"]["output_layer_norm"]["bias"] ) # Pooler Layers new_state_dict["fnet.pooler.dense.weight"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["kernel"]).T new_state_dict["fnet.pooler.dense.bias"] = torch.tensor(pretrained_model_params["encoder"]["pooler"]["bias"]) # Masked LM Layers new_state_dict["cls.predictions.transform.dense.weight"] = torch.tensor( pretrained_model_params["predictions_dense"]["kernel"] ).T new_state_dict["cls.predictions.transform.dense.bias"] = torch.tensor( pretrained_model_params["predictions_dense"]["bias"] ) new_state_dict["cls.predictions.transform.LayerNorm.weight"] = torch.tensor( pretrained_model_params["predictions_layer_norm"]["scale"] ) new_state_dict["cls.predictions.transform.LayerNorm.bias"] = torch.tensor( pretrained_model_params["predictions_layer_norm"]["bias"] ) new_state_dict["cls.predictions.decoder.weight"] = torch.tensor( pretrained_model_params["encoder"]["embedder"]["word"]["embedding"] ) new_state_dict["cls.predictions.decoder.bias"] = torch.tensor( pretrained_model_params["predictions_output"]["output_bias"] ) new_state_dict["cls.predictions.bias"] = torch.tensor(pretrained_model_params["predictions_output"]["output_bias"]) # Seq Relationship Layers new_state_dict["cls.seq_relationship.weight"] = torch.tensor( pretrained_model_params["classification"]["output_kernel"] ) new_state_dict["cls.seq_relationship.bias"] = torch.tensor( pretrained_model_params["classification"]["output_bias"] ) # Load State Dict fnet_pretraining_model.load_state_dict(new_state_dict) # Save PreTrained print(f"Saving pretrained model to {save_path}") fnet_pretraining_model.save_pretrained(save_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--flax_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--fnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained FNet model. \n" "This specifies the model architecture." ), ) parser.add_argument("--save_path", default=None, type=str, required=True, help="Path to the output model.") args = parser.parse_args() convert_flax_checkpoint_to_pytorch(args.flax_checkpoint_path, args.fnet_config_file, args.save_path)
transformers/src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/fnet/convert_fnet_original_flax_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 2770 }
495
# coding=utf-8 # Copyright 2020-present Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Funnel Transformer model.""" import os from dataclasses import dataclass from typing import Optional, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging from .configuration_funnel import FunnelConfig logger = logging.get_logger(__name__) INF = 1e6 def load_tf_weights_in_funnel(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) _layer_map = { "k": "k_head", "q": "q_head", "v": "v_head", "o": "post_proj", "layer_1": "linear_1", "layer_2": "linear_2", "rel_attn": "attention", "ff": "ffn", "kernel": "weight", "gamma": "weight", "beta": "bias", "lookup_table": "weight", "word_embedding": "word_embeddings", "input": "embeddings", } for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue if name[0] == "generator": continue pointer = model skipped = False for m_name in name[1:]: if not isinstance(pointer, FunnelPositionwiseFFN) and re.fullmatch(r"layer_\d+", m_name): layer_index = int(re.search(r"layer_(\d+)", m_name).groups()[0]) if layer_index < config.num_hidden_layers: block_idx = 0 while layer_index >= config.block_sizes[block_idx]: layer_index -= config.block_sizes[block_idx] block_idx += 1 pointer = pointer.blocks[block_idx][layer_index] else: layer_index -= config.num_hidden_layers pointer = pointer.layers[layer_index] elif m_name == "r" and isinstance(pointer, FunnelRelMultiheadAttention): pointer = pointer.r_kernel break elif m_name in _layer_map: pointer = getattr(pointer, _layer_map[m_name]) else: try: pointer = getattr(pointer, m_name) except AttributeError: print(f"Skipping {'/'.join(name)}", array.shape) skipped = True break if not skipped: if len(pointer.shape) != len(array.shape): array = array.reshape(pointer.shape) if m_name == "kernel": array = np.transpose(array) pointer.data = torch.from_numpy(array) return model class FunnelEmbeddings(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) def forward( self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None ) -> torch.Tensor: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) embeddings = self.layer_norm(inputs_embeds) embeddings = self.dropout(embeddings) return embeddings class FunnelAttentionStructure(nn.Module): """ Contains helpers for `FunnelRelMultiheadAttention `. """ cls_token_type_id: int = 2 def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.sin_dropout = nn.Dropout(config.hidden_dropout) self.cos_dropout = nn.Dropout(config.hidden_dropout) # Track where we are at in terms of pooling from the original input, e.g., by how much the sequence length was # divided. self.pooling_mult = None def init_attention_inputs( self, inputs_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor]: """Returns the attention inputs associated to the inputs of the model.""" # inputs_embeds has shape batch_size x seq_len x d_model # attention_mask and token_type_ids have shape batch_size x seq_len self.pooling_mult = 1 self.seq_len = seq_len = inputs_embeds.size(1) position_embeds = self.get_position_embeds(seq_len, inputs_embeds.dtype, inputs_embeds.device) token_type_mat = self.token_type_ids_to_mat(token_type_ids) if token_type_ids is not None else None cls_mask = ( nn.functional.pad(inputs_embeds.new_ones([seq_len - 1, seq_len - 1]), (1, 0, 1, 0)) if self.config.separate_cls else None ) return (position_embeds, token_type_mat, attention_mask, cls_mask) def token_type_ids_to_mat(self, token_type_ids: torch.Tensor) -> torch.Tensor: """Convert `token_type_ids` to `token_type_mat`.""" token_type_mat = token_type_ids[:, :, None] == token_type_ids[:, None] # Treat <cls> as in the same segment as both A & B cls_ids = token_type_ids == self.cls_token_type_id cls_mat = cls_ids[:, :, None] | cls_ids[:, None] return cls_mat | token_type_mat def get_position_embeds( self, seq_len: int, dtype: torch.dtype, device: torch.device ) -> Union[tuple[torch.Tensor], list[list[torch.Tensor]]]: """ Create and cache inputs related to relative position encoding. Those are very different depending on whether we are using the factorized or the relative shift attention: For the factorized attention, it returns the matrices (phi, pi, psi, omega) used in the paper, appendix A.2.2, final formula. For the relative shift attention, it returns all possible vectors R used in the paper, appendix A.2.1, final formula. Paper link: https://huggingface.co/papers/2006.03236 """ d_model = self.config.d_model if self.config.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula. # We need to create and return the matrices phi, psi, pi and omega. pos_seq = torch.arange(0, seq_len, 1.0, dtype=torch.int64, device=device).to(dtype) freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) sinusoid = pos_seq[:, None] * inv_freq[None] sin_embed = torch.sin(sinusoid) sin_embed_d = self.sin_dropout(sin_embed) cos_embed = torch.cos(sinusoid) cos_embed_d = self.cos_dropout(cos_embed) # This is different from the formula on the paper... phi = torch.cat([sin_embed_d, sin_embed_d], dim=-1) psi = torch.cat([cos_embed, sin_embed], dim=-1) pi = torch.cat([cos_embed_d, cos_embed_d], dim=-1) omega = torch.cat([-sin_embed, cos_embed], dim=-1) return (phi, pi, psi, omega) else: # Notations from the paper, appending A.2.1, final formula. # We need to create and return all the possible vectors R for all blocks and shifts. freq_seq = torch.arange(0, d_model // 2, 1.0, dtype=torch.int64, device=device).to(dtype) inv_freq = 1 / (10000 ** (freq_seq / (d_model // 2))) # Maximum relative positions for the first input rel_pos_id = torch.arange(-seq_len * 2, seq_len * 2, 1.0, dtype=torch.int64, device=device).to(dtype) zero_offset = seq_len * 2 sinusoid = rel_pos_id[:, None] * inv_freq[None] sin_embed = self.sin_dropout(torch.sin(sinusoid)) cos_embed = self.cos_dropout(torch.cos(sinusoid)) pos_embed = torch.cat([sin_embed, cos_embed], dim=-1) pos = torch.arange(0, seq_len, dtype=torch.int64, device=device).to(dtype) pooled_pos = pos position_embeds_list = [] for block_index in range(0, self.config.num_blocks): # For each block with block_index > 0, we need two types position embeddings: # - Attention(pooled-q, unpooled-kv) # - Attention(pooled-q, pooled-kv) # For block_index = 0 we only need the second one and leave the first one as None. # First type if block_index == 0: position_embeds_pooling = None else: pooled_pos = self.stride_pool_pos(pos, block_index) # construct rel_pos_id stride = 2 ** (block_index - 1) rel_pos = self.relative_pos(pos, stride, pooled_pos, shift=2) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_pooling = torch.gather(pos_embed, 0, rel_pos) # Second type pos = pooled_pos stride = 2**block_index rel_pos = self.relative_pos(pos, stride) rel_pos = rel_pos[:, None] + zero_offset rel_pos = rel_pos.expand(rel_pos.size(0), d_model) position_embeds_no_pooling = torch.gather(pos_embed, 0, rel_pos) position_embeds_list.append([position_embeds_no_pooling, position_embeds_pooling]) return position_embeds_list def stride_pool_pos(self, pos_id: torch.Tensor, block_index: int): """ Pool `pos_id` while keeping the cls token separate (if `config.separate_cls=True`). """ if self.config.separate_cls: # Under separate <cls>, we treat the <cls> as the first token in # the previous block of the 1st real block. Since the 1st real # block always has position 1, the position of the previous block # will be at `1 - 2 ** block_index`. cls_pos = pos_id.new_tensor([-(2**block_index) + 1]) pooled_pos_id = pos_id[1:-1] if self.config.truncate_seq else pos_id[1:] return torch.cat([cls_pos, pooled_pos_id[::2]], 0) else: return pos_id[::2] def relative_pos(self, pos: torch.Tensor, stride: int, pooled_pos=None, shift: int = 1) -> torch.Tensor: """ Build the relative positional vector between `pos` and `pooled_pos`. """ if pooled_pos is None: pooled_pos = pos ref_point = pooled_pos[0] - pos[0] num_remove = shift * len(pooled_pos) max_dist = ref_point + num_remove * stride min_dist = pooled_pos[0] - pos[-1] return torch.arange(max_dist, min_dist - 1, -stride, dtype=torch.long, device=pos.device) def stride_pool( self, tensor: Union[torch.Tensor, tuple[torch.Tensor], list[torch.Tensor]], axis: Union[int, tuple[int], list[int]], ) -> torch.Tensor: """ Perform pooling by stride slicing the tensor along the given axis. """ if tensor is None: return None # Do the stride pool recursively if axis is a list or a tuple of ints. if isinstance(axis, (list, tuple)): for ax in axis: tensor = self.stride_pool(tensor, ax) return tensor # Do the stride pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.stride_pool(x, axis) for x in tensor) # Deal with negative axis axis %= tensor.ndim axis_slice = ( slice(None, -1, 2) if self.config.separate_cls and self.config.truncate_seq else slice(None, None, 2) ) enc_slice = [slice(None)] * axis + [axis_slice] if self.config.separate_cls: cls_slice = [slice(None)] * axis + [slice(None, 1)] tensor = torch.cat([tensor[cls_slice], tensor], axis=axis) return tensor[enc_slice] def pool_tensor( self, tensor: Union[torch.Tensor, tuple[torch.Tensor], list[torch.Tensor]], mode: str = "mean", stride: int = 2 ) -> torch.Tensor: """Apply 1D pooling to a tensor of size [B x T (x H)].""" if tensor is None: return None # Do the pool recursively if tensor is a list or tuple of tensors. if isinstance(tensor, (tuple, list)): return type(tensor)(self.pool_tensor(tensor, mode=mode, stride=stride) for x in tensor) if self.config.separate_cls: suffix = tensor[:, :-1] if self.config.truncate_seq else tensor tensor = torch.cat([tensor[:, :1], suffix], dim=1) ndim = tensor.ndim if ndim == 2: tensor = tensor[:, None, :, None] elif ndim == 3: tensor = tensor[:, None, :, :] # Stride is applied on the second-to-last dimension. stride = (stride, 1) if mode == "mean": tensor = nn.functional.avg_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == "max": tensor = nn.functional.max_pool2d(tensor, stride, stride=stride, ceil_mode=True) elif mode == "min": tensor = -nn.functional.max_pool2d(-tensor, stride, stride=stride, ceil_mode=True) else: raise NotImplementedError("The supported modes are 'mean', 'max' and 'min'.") if ndim == 2: return tensor[:, 0, :, 0] elif ndim == 3: return tensor[:, 0] return tensor def pre_attention_pooling( self, output, attention_inputs: tuple[torch.Tensor] ) -> tuple[torch.Tensor, tuple[torch.Tensor]]: """Pool `output` and the proper parts of `attention_inputs` before the attention layer.""" position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.config.pool_q_only: if self.config.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds[:2], 0) + position_embeds[2:] token_type_mat = self.stride_pool(token_type_mat, 1) cls_mask = self.stride_pool(cls_mask, 0) output = self.pool_tensor(output, mode=self.config.pooling_type) else: self.pooling_mult *= 2 if self.config.attention_type == "factorized": position_embeds = self.stride_pool(position_embeds, 0) token_type_mat = self.stride_pool(token_type_mat, [1, 2]) cls_mask = self.stride_pool(cls_mask, [1, 2]) attention_mask = self.pool_tensor(attention_mask, mode="min") output = self.pool_tensor(output, mode=self.config.pooling_type) attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return output, attention_inputs def post_attention_pooling(self, attention_inputs: tuple[torch.Tensor]) -> tuple[torch.Tensor]: """Pool the proper parts of `attention_inputs` after the attention layer.""" position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs if self.config.pool_q_only: self.pooling_mult *= 2 if self.config.attention_type == "factorized": position_embeds = position_embeds[:2] + self.stride_pool(position_embeds[2:], 0) token_type_mat = self.stride_pool(token_type_mat, 2) cls_mask = self.stride_pool(cls_mask, 1) attention_mask = self.pool_tensor(attention_mask, mode="min") attention_inputs = (position_embeds, token_type_mat, attention_mask, cls_mask) return attention_inputs def _relative_shift_gather(positional_attn: torch.Tensor, context_len: int, shift: int) -> torch.Tensor: batch_size, n_head, seq_len, max_rel_len = positional_attn.shape # max_rel_len = 2 * context_len + shift -1 is the numbers of possible relative positions i-j # What's next is the same as doing the following gather, which might be clearer code but less efficient. # idxs = context_len + torch.arange(0, context_len).unsqueeze(0) - torch.arange(0, seq_len).unsqueeze(1) # # matrix of context_len + i-j # return positional_attn.gather(3, idxs.expand([batch_size, n_head, context_len, context_len])) positional_attn = torch.reshape(positional_attn, [batch_size, n_head, max_rel_len, seq_len]) positional_attn = positional_attn[:, :, shift:, :] positional_attn = torch.reshape(positional_attn, [batch_size, n_head, seq_len, max_rel_len - shift]) positional_attn = positional_attn[..., :context_len] return positional_attn class FunnelRelMultiheadAttention(nn.Module): def __init__(self, config: FunnelConfig, block_index: int) -> None: super().__init__() self.config = config self.block_index = block_index d_model, n_head, d_head = config.d_model, config.n_head, config.d_head self.hidden_dropout = nn.Dropout(config.hidden_dropout) self.attention_dropout = nn.Dropout(config.attention_dropout) self.q_head = nn.Linear(d_model, n_head * d_head, bias=False) self.k_head = nn.Linear(d_model, n_head * d_head) self.v_head = nn.Linear(d_model, n_head * d_head) self.r_w_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_r_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.r_kernel = nn.Parameter(torch.zeros([d_model, n_head, d_head])) self.r_s_bias = nn.Parameter(torch.zeros([n_head, d_head])) self.seg_embed = nn.Parameter(torch.zeros([2, n_head, d_head])) self.post_proj = nn.Linear(n_head * d_head, d_model) self.layer_norm = nn.LayerNorm(d_model, eps=config.layer_norm_eps) self.scale = 1.0 / (d_head**0.5) def relative_positional_attention(self, position_embeds, q_head, context_len, cls_mask=None): """Relative attention score for the positional encodings""" # q_head has shape batch_size x sea_len x n_head x d_head if self.config.attention_type == "factorized": # Notations from the paper, appending A.2.2, final formula (https://huggingface.co/papers/2006.03236) # phi and pi have shape seq_len x d_model, psi and omega have shape context_len x d_model phi, pi, psi, omega = position_embeds # Shape n_head x d_head u = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape batch_size x sea_len x n_head x d_model q_r_attention = torch.einsum("binh,dnh->bind", q_head + u, w_r) q_r_attention_1 = q_r_attention * phi[:, None] q_r_attention_2 = q_r_attention * pi[:, None] # Shape batch_size x n_head x seq_len x context_len positional_attn = torch.einsum("bind,jd->bnij", q_r_attention_1, psi) + torch.einsum( "bind,jd->bnij", q_r_attention_2, omega ) else: shift = 2 if q_head.shape[1] != context_len else 1 # Notations from the paper, appending A.2.1, final formula (https://huggingface.co/papers/2006.03236) # Grab the proper positional encoding, shape max_rel_len x d_model r = position_embeds[self.block_index][shift - 1] # Shape n_head x d_head v = self.r_r_bias * self.scale # Shape d_model x n_head x d_head w_r = self.r_kernel # Shape max_rel_len x n_head x d_model r_head = torch.einsum("td,dnh->tnh", r, w_r) # Shape batch_size x n_head x seq_len x max_rel_len positional_attn = torch.einsum("binh,tnh->bnit", q_head + v, r_head) # Shape batch_size x n_head x seq_len x context_len positional_attn = _relative_shift_gather(positional_attn, context_len, shift) if cls_mask is not None: positional_attn *= cls_mask return positional_attn def relative_token_type_attention(self, token_type_mat, q_head, cls_mask=None): """Relative attention score for the token_type_ids""" if token_type_mat is None: return 0 batch_size, seq_len, context_len = token_type_mat.shape # q_head has shape batch_size x seq_len x n_head x d_head # Shape n_head x d_head r_s_bias = self.r_s_bias * self.scale # Shape batch_size x n_head x seq_len x 2 token_type_bias = torch.einsum("bind,snd->bnis", q_head + r_s_bias, self.seg_embed) # Shape batch_size x n_head x seq_len x context_len token_type_mat = token_type_mat[:, None].expand([batch_size, q_head.shape[2], seq_len, context_len]) # Shapes batch_size x n_head x seq_len diff_token_type, same_token_type = torch.split(token_type_bias, 1, dim=-1) # Shape batch_size x n_head x seq_len x context_len token_type_attn = torch.where( token_type_mat, same_token_type.expand(token_type_mat.shape), diff_token_type.expand(token_type_mat.shape) ) if cls_mask is not None: token_type_attn *= cls_mask return token_type_attn def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_inputs: tuple[torch.Tensor], output_attentions: bool = False, ) -> tuple[torch.Tensor, ...]: # query has shape batch_size x seq_len x d_model # key and value have shapes batch_size x context_len x d_model position_embeds, token_type_mat, attention_mask, cls_mask = attention_inputs batch_size, seq_len, _ = query.shape context_len = key.shape[1] n_head, d_head = self.config.n_head, self.config.d_head # Shape batch_size x seq_len x n_head x d_head q_head = self.q_head(query).view(batch_size, seq_len, n_head, d_head) # Shapes batch_size x context_len x n_head x d_head k_head = self.k_head(key).view(batch_size, context_len, n_head, d_head) v_head = self.v_head(value).view(batch_size, context_len, n_head, d_head) q_head = q_head * self.scale # Shape n_head x d_head r_w_bias = self.r_w_bias * self.scale # Shapes batch_size x n_head x seq_len x context_len content_score = torch.einsum("bind,bjnd->bnij", q_head + r_w_bias, k_head) positional_attn = self.relative_positional_attention(position_embeds, q_head, context_len, cls_mask) token_type_attn = self.relative_token_type_attention(token_type_mat, q_head, cls_mask) # merge attention scores attn_score = content_score + positional_attn + token_type_attn # precision safe in case of mixed precision training dtype = attn_score.dtype attn_score = attn_score.float() # perform masking if attention_mask is not None: attn_score = attn_score - INF * (1 - attention_mask[:, None, None].float()) # attention probability attn_prob = torch.softmax(attn_score, dim=-1, dtype=dtype) attn_prob = self.attention_dropout(attn_prob) # attention output, shape batch_size x seq_len x n_head x d_head attn_vec = torch.einsum("bnij,bjnd->bind", attn_prob, v_head) # Shape shape batch_size x seq_len x d_model attn_out = self.post_proj(attn_vec.reshape(batch_size, seq_len, n_head * d_head)) attn_out = self.hidden_dropout(attn_out) output = self.layer_norm(query + attn_out) return (output, attn_prob) if output_attentions else (output,) class FunnelPositionwiseFFN(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.linear_1 = nn.Linear(config.d_model, config.d_inner) self.activation_function = ACT2FN[config.hidden_act] self.activation_dropout = nn.Dropout(config.activation_dropout) self.linear_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.d_model, config.layer_norm_eps) def forward(self, hidden: torch.Tensor) -> torch.Tensor: h = self.linear_1(hidden) h = self.activation_function(h) h = self.activation_dropout(h) h = self.linear_2(h) h = self.dropout(h) return self.layer_norm(hidden + h) class FunnelLayer(nn.Module): def __init__(self, config: FunnelConfig, block_index: int) -> None: super().__init__() self.attention = FunnelRelMultiheadAttention(config, block_index) self.ffn = FunnelPositionwiseFFN(config) def forward( self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_inputs, output_attentions: bool = False, ) -> tuple: attn = self.attention(query, key, value, attention_inputs, output_attentions=output_attentions) output = self.ffn(attn[0]) return (output, attn[1]) if output_attentions else (output,) class FunnelEncoder(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.blocks = nn.ModuleList( [ nn.ModuleList([FunnelLayer(config, block_index) for _ in range(block_size)]) for block_index, block_size in enumerate(config.block_sizes) ] ) def forward( self, inputs_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: # The pooling is not implemented on long tensors, so we convert this mask. attention_mask = attention_mask.type_as(inputs_embeds) attention_inputs = self.attention_structure.init_attention_inputs( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, ) hidden = inputs_embeds all_hidden_states = (inputs_embeds,) if output_hidden_states else None all_attentions = () if output_attentions else None for block_index, block in enumerate(self.blocks): pooling_flag = hidden.size(1) > (2 if self.config.separate_cls else 1) pooling_flag = pooling_flag and block_index > 0 if pooling_flag: pooled_hidden, attention_inputs = self.attention_structure.pre_attention_pooling( hidden, attention_inputs ) for layer_index, layer in enumerate(block): for repeat_index in range(self.config.block_repeats[block_index]): do_pooling = (repeat_index == 0) and (layer_index == 0) and pooling_flag if do_pooling: query = pooled_hidden key = value = hidden if self.config.pool_q_only else pooled_hidden else: query = key = value = hidden layer_output = layer(query, key, value, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if do_pooling: attention_inputs = self.attention_structure.post_attention_pooling(attention_inputs) if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) def upsample( x: torch.Tensor, stride: int, target_len: int, separate_cls: bool = True, truncate_seq: bool = False ) -> torch.Tensor: """ Upsample tensor `x` to match `target_len` by repeating the tokens `stride` time on the sequence length dimension. """ if stride == 1: return x if separate_cls: cls = x[:, :1] x = x[:, 1:] output = torch.repeat_interleave(x, repeats=stride, dim=1) if separate_cls: if truncate_seq: output = nn.functional.pad(output, (0, 0, 0, stride - 1, 0, 0)) output = output[:, : target_len - 1] output = torch.cat([cls, output], dim=1) else: output = output[:, :target_len] return output class FunnelDecoder(nn.Module): def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.attention_structure = FunnelAttentionStructure(config) self.layers = nn.ModuleList([FunnelLayer(config, 0) for _ in range(config.num_decoder_layers)]) def forward( self, final_hidden: torch.Tensor, first_block_hidden: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ) -> Union[tuple, BaseModelOutput]: upsampled_hidden = upsample( final_hidden, stride=2 ** (len(self.config.block_sizes) - 1), target_len=first_block_hidden.shape[1], separate_cls=self.config.separate_cls, truncate_seq=self.config.truncate_seq, ) hidden = upsampled_hidden + first_block_hidden all_hidden_states = (hidden,) if output_hidden_states else None all_attentions = () if output_attentions else None attention_inputs = self.attention_structure.init_attention_inputs( hidden, attention_mask=attention_mask, token_type_ids=token_type_ids, ) for layer in self.layers: layer_output = layer(hidden, hidden, hidden, attention_inputs, output_attentions=output_attentions) hidden = layer_output[0] if output_attentions: all_attentions = all_attentions + layer_output[1:] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden,) if not return_dict: return tuple(v for v in [hidden, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput(last_hidden_state=hidden, hidden_states=all_hidden_states, attentions=all_attentions) class FunnelDiscriminatorPredictions(nn.Module): """Prediction module for the discriminator, made up of two dense layers.""" def __init__(self, config: FunnelConfig) -> None: super().__init__() self.config = config self.dense = nn.Linear(config.d_model, config.d_model) self.dense_prediction = nn.Linear(config.d_model, 1) def forward(self, discriminator_hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(discriminator_hidden_states) hidden_states = ACT2FN[self.config.hidden_act](hidden_states) logits = self.dense_prediction(hidden_states).squeeze(-1) return logits @auto_docstring class FunnelPreTrainedModel(PreTrainedModel): config: FunnelConfig load_tf_weights = load_tf_weights_in_funnel base_model_prefix = "funnel" def _init_weights(self, module): classname = module.__class__.__name__ if classname.find("Linear") != -1: if getattr(module, "weight", None) is not None: if self.config.initializer_std is None: fan_out, fan_in = module.weight.shape std = np.sqrt(1.0 / float(fan_in + fan_out)) else: std = self.config.initializer_std nn.init.normal_(module.weight, std=std) if getattr(module, "bias", None) is not None: nn.init.constant_(module.bias, 0.0) elif classname == "FunnelRelMultiheadAttention": nn.init.uniform_(module.r_w_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_r_bias, b=self.config.initializer_range) nn.init.uniform_(module.r_kernel, b=self.config.initializer_range) nn.init.uniform_(module.r_s_bias, b=self.config.initializer_range) nn.init.uniform_(module.seg_embed, b=self.config.initializer_range) elif classname == "FunnelEmbeddings": std = 1.0 if self.config.initializer_std is None else self.config.initializer_std nn.init.normal_(module.word_embeddings.weight, std=std) if module.word_embeddings.padding_idx is not None: module.word_embeddings.weight.data[module.word_embeddings.padding_idx].zero_() class FunnelClassificationHead(nn.Module): def __init__(self, config: FunnelConfig, n_labels: int) -> None: super().__init__() self.linear_hidden = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(config.hidden_dropout) self.linear_out = nn.Linear(config.d_model, n_labels) def forward(self, hidden: torch.Tensor) -> torch.Tensor: hidden = self.linear_hidden(hidden) hidden = torch.tanh(hidden) hidden = self.dropout(hidden) return self.linear_out(hidden) @dataclass @auto_docstring( custom_intro=""" Output type of [`FunnelForPreTraining`]. """ ) class FunnelForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss of the ELECTRA-style objective. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring( custom_intro=""" The base Funnel Transformer Model transformer outputting raw hidden-states without upsampling head (also called decoder) or any task-specific head on top. """ ) class FunnelBaseModel(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None: self.embeddings.word_embeddings = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # TODO: deal with head_mask inputs_embeds = self.embeddings(input_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs @auto_docstring class FunnelModel(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.config = config self.embeddings = FunnelEmbeddings(config) self.encoder = FunnelEncoder(config) self.decoder = FunnelDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Embedding: return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None: self.embeddings.word_embeddings = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # TODO: deal with head_mask inputs_embeds = self.embeddings(input_ids, inputs_embeds=inputs_embeds) encoder_outputs = self.encoder( inputs_embeds, attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) decoder_outputs = self.decoder( final_hidden=encoder_outputs[0], first_block_hidden=encoder_outputs[1][self.config.block_sizes[0]], attention_mask=attention_mask, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: idx = 0 outputs = (decoder_outputs[0],) if output_hidden_states: idx += 1 outputs = outputs + (encoder_outputs[1] + decoder_outputs[idx],) if output_attentions: idx += 1 outputs = outputs + (encoder_outputs[2] + decoder_outputs[idx],) return outputs return BaseModelOutput( last_hidden_state=decoder_outputs[0], hidden_states=(encoder_outputs.hidden_states + decoder_outputs.hidden_states) if output_hidden_states else None, attentions=(encoder_outputs.attentions + decoder_outputs.attentions) if output_attentions else None, ) @auto_docstring( custom_intro=""" Funnel Transformer model with a binary classification head on top as used during pretraining for identifying generated tokens. """ ) class FunnelForPreTraining(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelModel(config) self.discriminator_predictions = FunnelDiscriminatorPredictions(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, FunnelForPreTrainingOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the ELECTRA-style loss. Input should be a sequence of tokens (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates the token is an original token, - 1 indicates the token was replaced. Examples: ```python >>> from transformers import AutoTokenizer, FunnelForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("funnel-transformer/small") >>> model = FunnelForPreTraining.from_pretrained("funnel-transformer/small") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> logits = model(**inputs).logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.BCEWithLogitsLoss() if attention_mask is not None: active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1 active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss] active_labels = labels[active_loss] loss = loss_fct(active_logits, active_labels.float()) else: loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return FunnelForPreTrainingOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @auto_docstring class FunnelForMaskedLM(FunnelPreTrainedModel): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self) -> nn.Linear: return self.lm_head def set_output_embeddings(self, new_embeddings: nn.Embedding) -> None: self.lm_head = new_embeddings @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] prediction_logits = self.lm_head(last_hidden_state) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" Funnel Transformer Model with a sequence classification/regression head on top (two linear layer on top of the first timestep of the last hidden state) e.g. for GLUE tasks. """ ) class FunnelForSequenceClassification(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.config = config self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class FunnelForMultipleChoice(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.funnel = FunnelBaseModel(config) self.classifier = FunnelClassificationHead(config, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] pooled_output = last_hidden_state[:, 0] logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class FunnelForTokenClassification(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.dropout(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class FunnelForQuestionAnswering(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] logits = self.qa_outputs(last_hidden_state) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ]
transformers/src/transformers/models/funnel/modeling_funnel.py/0
{ "file_path": "transformers/src/transformers/models/funnel/modeling_funnel.py", "repo_id": "transformers", "token_count": 27621 }
496
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/gemma/modular_gemma.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_gemma.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from shutil import copyfile from typing import TYPE_CHECKING, Any, Optional import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging from ...utils.import_utils import requires if TYPE_CHECKING: from ...tokenization_utils_base import TextInput logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} SPIECE_UNDERLINE = "▁" @requires(backends=("sentencepiece",)) class GemmaTokenizer(PreTrainedTokenizer): """ Construct a Gemma tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is no padding token in the original model. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. sp_model_kwargs (`dict[str, Any]`, `Optional`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. add_bos_token (`bool`, *optional*, defaults to `True`): Whether or not to add an `bos_token` at the start of sequences. add_eos_token (`bool`, *optional*, defaults to `False`): Whether or not to add an `eos_token` at the end of sequences. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra spaces. use_default_system_prompt (`bool`, *optional*, defaults to `False`): Whether or not the default system prompt for Gemma should be used. spaces_between_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to add spaces between special tokens. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<bos>", eos_token="<eos>", pad_token="<pad>", sp_model_kwargs: Optional[dict[str, Any]] = None, add_bos_token=True, add_eos_token=False, clean_up_tokenization_spaces=False, use_default_system_prompt=False, spaces_between_special_tokens=False, **kwargs, ): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token self.vocab_file = vocab_file self.add_bos_token = add_bos_token self.add_eos_token = add_eos_token self.use_default_system_prompt = use_default_system_prompt self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, sp_model_kwargs=sp_model_kwargs, clean_up_tokenization_spaces=clean_up_tokenization_spaces, use_default_system_prompt=use_default_system_prompt, spaces_between_special_tokens=spaces_between_special_tokens, **kwargs, ) def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__.update(d) self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def vocab_size(self): """Returns vocab size""" return self.sp_model.get_piece_size() def get_vocab(self): """Returns vocab as a dict""" vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def tokenize(self, text: "TextInput", **kwargs) -> list[str]: """ Args: text: TextInput Simply calls PreTrainedTokenizer's method """ return super().tokenize(text, **kwargs) def _tokenize(self, text, **kwargs): """ Args: text: TextInput Returns a tokenized string. The Gemma tokenizer never adds a prefix space. """ return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self._added_tokens_encoder: out_string += self.sp_model.decode(current_sub_tokens) + token current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> tuple[str]: """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. Returns: `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = bos_token_id + token_ids_0 + eos_token_id if token_ids_1 is not None: output = output + bos_token_id + token_ids_1 + eos_token_id return output def get_special_tokens_mask( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False ) -> list[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`list[int]`): List of IDs. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id return ( bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + bos_token_id + ([0] * len(token_ids_1)) + eos_token_id ) def create_token_type_ids_from_sequences( self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None ) -> list[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`list[int]`): List of ids. token_ids_1 (`list[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `list[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ bos_token_id = [self.bos_token_id] if self.add_bos_token else [] eos_token_id = [self.eos_token_id] if self.add_eos_token else [] output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) if token_ids_1 is not None: output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) return output def _decode( self, token_ids: list[int], skip_special_tokens: bool = False, spaces_between_special_tokens: bool = False, **kwargs, ) -> str: sub_texts = [] current_sub_text = [] for ids in token_ids: if skip_special_tokens and ids in self.all_special_ids: continue if ids in self._added_tokens_decoder: if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) sub_texts.append(self._added_tokens_decoder[ids].content) current_sub_text = [] else: current_sub_text.append(ids) if current_sub_text: sub_texts.append(self.sp_model.decode(current_sub_text)) if spaces_between_special_tokens: sub_texts = " ".join(sub_texts) else: sub_texts = "".join(sub_texts) return sub_texts.replace(SPIECE_UNDERLINE, " ") __all__ = ["GemmaTokenizer"]
transformers/src/transformers/models/gemma/tokenization_gemma.py/0
{ "file_path": "transformers/src/transformers/models/gemma/tokenization_gemma.py", "repo_id": "transformers", "token_count": 6346 }
497
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/gemma3n/modular_gemma3n.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_gemma3n.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Sequence from typing import Any, Optional, Union from ...configuration_utils import PretrainedConfig, layer_type_validation from ...modeling_rope_utils import rope_config_validation from ...utils import is_timm_available, logging, requires_backends if is_timm_available(): from timm.data import ImageNetInfo, infer_imagenet_subset logger = logging.get_logger(__name__) class Gemma3nTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nTextModel`]. It is used to instantiate an Gemma3nTextModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects that inherit from [`Gemma3nTextConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nTextConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 262400): Vocabulary size of the Gemma3nText model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Gemma3nTextModel`] vocab_size_per_layer_input (`int`, *optional*, defaults to 262144): Vocabulary size of the per-layer text embeddings that augment the standard embeddings. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. hidden_size_per_layer_input (`int`, *optional*, defaults to 256): Dimension of the hidden representations for per-layer emebeddings. intermediate_size (`int` or `Sequence[int]`, *optional*, defaults to 16384): Dimension of the MLP representations. MatFormer configurations may wish to provide a sequence of integers to account for vairable intermediate_size values across layers. In such cases, `len(intermediate_size) == num_hidden_layers`. num_hidden_layers (`int`, *optional*, defaults to 35): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 2): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout this [paper](https://arxiv.org/pdf/2305.13245.pdf). If not specified, will default to `num_attention_heads`. head_dim (`int`, *optional*, defaults to 256): The attention head dimension. hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"` if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function. max_position_embeddings (`int`, *optional*, defaults to 32768): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*, defaults to 0): Padding token id. eos_token_id (`int`, *optional*, defaults to 1): End of stream token id. bos_token_id (`int`, *optional*, defaults to 2): Beginning of stream token id. rope_theta (`float`, *optional*, defaults to 1000000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings used in gloabl attention. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`List[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE rope_local_base_freq (float, *optional*, defaults to 10000.0): The base period of the RoPE embeddings for local attention. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. sliding_window (`int`, *optional*, defaults to 512): This is the size of the sliding window used by local attention layers. layer_types (`Optional`, *optional*): A sequence of strings defining the attention type for that layer as either "sliding_attention" or "full_attention". If not provided, `layer_types` will de inferred from `num_hidden_layers` using a pattern of four "sliding_attention" layers followed one "full_attention". The last layer in the model should always be a "full_attention" layer. final_logit_softcapping (`float`, *optional*, defaults to 30.0): Scaling factor when applying tanh softcapping on the logits. altup_active_idx (`int`, *optional*, defaults to 0): The index of the prediction from which AltUp will compute additional predictions or correct altup_coef_clip (`float`, *optional*, defaults to 120.0): The maximum amplitude of an AltUp prediction or correction coeficient weight. altup_correct_scale (`bool`, *optional*, defaults to `True`): If True, apply the `AltUp.correct_output_scale` to the corrected prediction at `altup_active_idx`. altup_num_inputs (`int`, *optional*, defaults to 4): The number of predictions that AltUp should be make given the input sequence. num_kv_shared_layers (`int`, *optional*, defaults to 15): The number of layer that share KV cache values. During the forward pass, the last `num_kv_shared_layers` layers in the model "share" the KV values in that each local and global layer in this range uses the KV cache values computed for the last local or global layer, respectively, before entering this range. The value should be `num_kv_shared_layers` should be a scalar of `sliding_window_pattern`. laurel_rank (int, *optional*, defaults to 64): The intermediate size for the linear projections in the Learned Augmented Residual Layer. activation_sparsity_pattern (Sequence[float], *optional*, defaults to `(0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)`): The sparsity factor used to extract the top-k activations for a given layer. The provided Sequence must explicitly provide a sparsity value for each layer in the model. ```python >>> from transformers import Gemma3nTextModel, Gemma3nTextConfig >>> # Initializing a Gemma3nText gemma3n_text-E4B style configuration >>> configuration = Gemma3nTextConfig() >>> # Initializing a model from the gemma3n_text-E4B style configuration >>> model = Gemma3nTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_text" keys_to_ignore_at_inference = ["past_key_values"] base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.mlp.gate_proj": "colwise", "layers.*.mlp.up_proj": "colwise", "layers.*.mlp.down_proj": "rowwise", } base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } def __init__( self, vocab_size: int = 262_400, vocab_size_per_layer_input: int = 262_144, hidden_size: int = 2048, hidden_size_per_layer_input: int = 256, intermediate_size: Union[int, Sequence[int]] = 16_384, num_hidden_layers: int = 35, num_attention_heads: int = 8, num_key_value_heads: int = 2, head_dim: int = 256, hidden_activation: str = "gelu_pytorch_tanh", max_position_embeddings: int = 32_768, initializer_range: float = 0.02, rms_norm_eps: float = 1e-6, use_cache: bool = True, pad_token_id: int = 0, eos_token_id: int = 1, bos_token_id: int = 2, rope_theta: float = 1_000_000.0, rope_scaling: Optional[dict[str, Any]] = None, rope_local_base_freq: float = 10_000.0, attention_bias: bool = False, attention_dropout: float = 0.0, sliding_window: int = 512, layer_types: Optional[Sequence[str]] = None, final_logit_softcapping: float = 30.0, altup_active_idx: int = 0, altup_coef_clip: float = 120.0, altup_correct_scale: bool = True, altup_num_inputs: int = 4, num_kv_shared_layers: int = 15, laurel_rank: int = 64, activation_sparsity_pattern: Optional[Union[float, Sequence[float]]] = (0.95,) * 10 + (0.0,) * 25, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) if isinstance(intermediate_size, Sequence) and (intsize_len := len(intermediate_size)) != num_hidden_layers: raise ValueError( "intermediate_size must have an explicit intermediate size for every layer or one for all layers. " f"Expected {num_hidden_layers} values but got {intsize_len}." ) elif not isinstance(intermediate_size, Sequence): intermediate_size = [intermediate_size] * num_hidden_layers self.vocab_size = vocab_size self.vocab_size_per_layer_input = vocab_size_per_layer_input self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.head_dim = head_dim self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.hidden_activation = hidden_activation self.sliding_window = sliding_window self.final_logit_softcapping = final_logit_softcapping self.layer_types = layer_types self.rope_local_base_freq = rope_local_base_freq self.rope_scaling = rope_scaling rope_config_validation(self) if layer_types is None: self.layer_types = [ "full_attention" if (i + 1) % 5 == 0 else "sliding_attention" for i in range(self.num_hidden_layers) ] else: self.layer_types = layer_types layer_type_validation(self.layer_types) self.hidden_size_per_layer_input = hidden_size_per_layer_input self.num_kv_shared_layers = num_kv_shared_layers self.altup_active_idx = altup_active_idx self.altup_coef_clip = altup_coef_clip self.altup_correct_scale = altup_correct_scale self.altup_num_inputs = altup_num_inputs self.laurel_rank = laurel_rank if activation_sparsity_pattern is None: activation_sparsity_pattern = [0.0] * num_hidden_layers if (len_asp := len(activation_sparsity_pattern)) != num_hidden_layers: raise ValueError( "activation_sparsity_pattern must have an explicit activation sparsity value for every layer." f"Expected {num_hidden_layers} values but got {len_asp}." ) self.activation_sparsity_pattern = activation_sparsity_pattern class Gemma3nAudioConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nAudioEncoder`]. It is used to instantiate an `Gemma3nAudioEncoder` model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B, e.g., [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects that inherit from [`Gemma3nAudioConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nAudioConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 128): Vocabulary size of the additional hard-token embeddings for audio model. These augment the embeddings included in the `Gemma3nTextModel` to provide, e.g., the end of audio and audio soft token placeholder tokens when converting `input_ids` to embeddings in the `Gemma3nForConditionalGeneration` model. vocab_offset (`int`, *optional*, defaults to 262272): Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the 0-indexed `Gemma3nMultimodalEmbedder.embedding` table. input_feat_size (`int`, *optional*, defaults to 128): The number of channels in each mel-spectrogram frame. hidden_size (`int`, *optional*, defaults to 1536): Dimension of the hidden representations. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. gradient_clipping (`float`, *optional*, defaults to 10000000000.0): Clipping value used to stablize extremely large gradient values. conf_attention_chunk_size (`int`, *optional*, defaults to 12): The sub-sequence size for local attention processing inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_context_left (`int`, *optional*, defaults to 13): The left context size of the local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_context_right (`int`, *optional*, defaults to 0): The right context size of the local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_attention_logit_cap (`float`, *optional*, defaults to 50.0): Logit cap applied during local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_num_attention_heads (`int`, *optional*, defaults to 8): The number of attention heads in local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_num_hidden_layers (`int`, *optional*, defaults to 12): The number of layers that use local attention inside the Conformer ("conf") section of the Universal Speech Model. conf_conv_kernel_size (`int`, *optional*, defaults to 5): Convolution kernel size for the conformer block inside the Conformer ("conf") section of the Universal Speech Model. conf_reduction_factor (`int`, *optional*, defaults to 4): Reduction factor used in the conformer block inside the Conformer ("conf") section of the Universal Speech Model. conf_residual_weight (`float`, *optional*, defaults to 0.5): Residual connection weight inside the Conformer ("conf") section of the Universal Speech Model. sscp_conv_channel_size (`tuple(int, int)`, *optional*, defaults to `(128, 32)`): The channel sizes for the first and second convolutional layers in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. sscp_conv_group_norm_eps (`float`, *optional*, defaults to 0.001): Epsilon used in group normalization in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. sscp_conv_kernel_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((3, 3), (3, 3))`): Kernel sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. The kernel sizes are specified as a tuple of height and width for each layer, where the height corresponds to the time dimension and the width corresponds to the frequency dimension. sscp_conv_stride_size (`tuple(tuple(int, int), tuple(int, int))`, *optional*, defaults to `((2, 2), (2, 2))`): Stride sizes of the two convolutional layers in the subsample convolution projection in the Sub-sample Convolution Projection ("sscp") section of the Universal Speech Model. The stride sizes are specified as a tuple of height and width for each layer, where the height corresponds to the time dimension and the width corresponds to the frequency dimension. Example: ```python >>> from transformers import Gemma3nAudioConfig, Gemma3nAudioEncoder >>> # Initializing a Gemma3nAudioEncoder gemma3n_audio-E4B-style configuration >>> configuration = Gemma3nAudioConfig() >>> # Initializing a model from the gemma3n_audio-E4B style configuration >>> model = Gemma3nAudioEncoder(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_audio" def __init__( self, vocab_size: int = 128, vocab_offset: int = 262_144 + 128, # text vocab size + vision vocab size input_feat_size: int = 128, hidden_size: int = 1536, rms_norm_eps: float = 1e-6, gradient_clipping: float = 10_000_000_000.0, conf_attention_chunk_size: int = 12, conf_attention_context_left: int = 13, conf_attention_context_right: int = 0, conf_attention_logit_cap: float = 50.0, conf_num_attention_heads: int = 8, conf_num_hidden_layers: int = 12, conf_conv_kernel_size: int = 5, conf_reduction_factor: int = 4, conf_residual_weight: float = 0.5, sscp_conv_channel_size: tuple[int, int] = (128, 32), sscp_conv_group_norm_eps: float = 1e-3, sscp_conv_kernel_size: tuple[tuple[int, int], tuple[int, int]] = ( (3, 3), (3, 3), ), sscp_conv_stride_size: tuple[tuple[int, int], tuple[int, int]] = ( (2, 2), (2, 2), ), **kwargs, ): super().__init__(**kwargs) self.input_feat_size = input_feat_size self.hidden_size = hidden_size self.rms_norm_eps = rms_norm_eps self.vocab_size = vocab_size self.vocab_offset = vocab_offset self.gradient_clipping = gradient_clipping self.conf_attention_chunk_size = conf_attention_chunk_size self.conf_attention_context_left = conf_attention_context_left self.conf_attention_context_right = conf_attention_context_right self.conf_attention_logit_cap = conf_attention_logit_cap self.conf_num_attention_heads = conf_num_attention_heads self.conf_num_hidden_layers = conf_num_hidden_layers self.conf_conv_kernel_size = conf_conv_kernel_size self.conf_reduction_factor = conf_reduction_factor self.conf_residual_weight = conf_residual_weight self.sscp_conv_channel_size = sscp_conv_channel_size self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps self.sscp_conv_kernel_size = sscp_conv_kernel_size self.sscp_conv_stride_size = sscp_conv_stride_size class Gemma3nVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmWrapper`]. It is used to instantiate an timm model model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Gemma 3n E4B vision tower, e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B). Configuration objects inherit from [`Gemma3nVisionConfig`] and can be used to control the model outputs. Read the documentation from [`Gemma3nVisionConfig`] for more information. Config loads imagenet label descriptions and stores them in `id2label` attribute, `label2id` attribute for default imagenet models is set to `None` due to occlusions in the label descriptions. Args: initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. do_pooling (`bool`, *optional*, defaults to `False`): Whether to do pooling for the last_hidden_state in `TimmWrapper` or not. architecture (`str`, *optional*, defaults to `"mobilenetv5_300m_enc"`): Determines vision architecture for TimmWrapper. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden representations. vocab_size (`int`, *optional*, defaults to 128): Vocabulary size of the additional hard-token embeddings for vision model. vocab_offset (`int`, *optional*, defaults to 262144): Offset between the tokenizer vocab index for the token ids embedded by `Gemma3nMultimodalEmbedder` and the 0-indexed `Gemma3nMultimodalEmbedder.embedding` table. rms_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the rms normalization layers. Example: ```python >>> from transformers import Gemma3nVisionConfig, TimmWrapper >>> # Initializing a TimmWrapper gemma3n_vision-E4B-style configuration >>> configuration = Gemma3nVisionConfig() >>> # Initializing a gemma3n_vision-E4B-style TimmWrapper from the configuration >>> model = TimmWrapper(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "gemma3n_vision" def __init__( self, initializer_range: float = 0.02, do_pooling: bool = False, architecture: str = "mobilenetv5_300m_enc", hidden_size: int = 2048, vocab_size: int = 128, vocab_offset: int = 262_144, rms_norm_eps: float = 1e-06, model_args: Optional[dict] = None, **kwargs, ): super().__init__(**kwargs) self.initializer_range = initializer_range self.do_pooling = do_pooling self.model_args = model_args # named "model_args" for BC with timm self.architecture = architecture self.hidden_size = hidden_size self.vocab_size = vocab_size self.vocab_offset = vocab_offset self.rms_norm_eps = rms_norm_eps @classmethod def from_dict(cls, config_dict: dict[str, Any], **kwargs): label_names = config_dict.get("label_names") is_custom_model = "num_labels" in kwargs or "id2label" in kwargs # if no labels added to config, use imagenet labeller in timm if label_names is None and not is_custom_model: requires_backends(cls, ["timm"]) imagenet_subset = infer_imagenet_subset(config_dict) if imagenet_subset: dataset_info = ImageNetInfo(imagenet_subset) synsets = dataset_info.label_names() label_descriptions = dataset_info.label_descriptions(as_dict=True) label_names = [label_descriptions[synset] for synset in synsets] if label_names is not None and not is_custom_model: kwargs["id2label"] = dict(enumerate(label_names)) # if all label names are unique, create label2id mapping as well if len(set(label_names)) == len(label_names): kwargs["label2id"] = {name: i for i, name in enumerate(label_names)} else: kwargs["label2id"] = None # timm config stores the `num_classes` attribute in both the root of config and in the "pretrained_cfg" dict. # We are removing these attributes in order to have the native `transformers` num_labels attribute in config # and to avoid duplicate attributes num_labels_in_kwargs = kwargs.pop("num_labels", None) num_labels_in_dict = config_dict.pop("num_classes", None) # passed num_labels has priority over num_classes in config_dict kwargs["num_labels"] = num_labels_in_kwargs or num_labels_in_dict # pop num_classes from "pretrained_cfg", # it is not necessary to have it, only root one is used in timm if "pretrained_cfg" in config_dict and "num_classes" in config_dict["pretrained_cfg"]: config_dict["pretrained_cfg"].pop("num_classes", None) return super().from_dict(config_dict, **kwargs) def to_dict(self) -> dict[str, Any]: output = super().to_dict() output["num_classes"] = self.num_labels output["label_names"] = list(self.id2label.values()) output.pop("id2label", None) output.pop("label2id", None) return output class Gemma3nConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nForConditionalGeneration`]. It is used to instantiate a Gemma3nForConditionalGeneration according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Gemma3n-E4B. e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`Union[Gemma3nTextConfig, dict]`, *optional*): The config object of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*): Custom vision config or dict. audio_config (`Union[AutoConfig, dict]`, *optional*): Custom audio config or dict. audio_soft_tokens_per_image (`int`, *optional*, defaults to 188): The number of soft tokens per audio clip. vision_soft_tokens_per_image (`int`, *optional*, defaults to 256): The number of soft tokens per image. boi_token_id (`int`, *optional*, defaults to 255999): The begin-of-image token index to wrap the image prompt. eoi_token_id (`int`, *optional*, defaults to 262144): The end-of-image token index to wrap the image prompt. image_token_id (`int`, *optional*, defaults to 262145): The image token index to encode the image prompt. boa_token_id (`int`, *optional*, defaults to 256000): The begin-of-audio token index to wrap the audio prompt. eoa_token_id (`int`, *optional*, defaults to 262272): The end-of-audio token index to wrap the audio prompt. audio_token_id (`int`, *optional*, defaults to 262273): The audio token index to encode the audio prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Gemma3nForConditionalGeneration, Gemma3nConfig, Gemma3nTextConfig >>> # Initializing a MobileNet vision config, which is loaded from TIMM >>> vision_config = Gemma3nVisionConfig() >>> # Initializing a Gemma3n Audio config >>> audio_config = Gemma3nAudioConfig() >>> # Initializing a Gemma3n Text config >>> text_config = Gemma3nTextConfig() >>> # Initializing a Gemma3n gemma-3-4b style configuration >>> configuration = Gemma3nConfig(text_config, vision_config, audio_config) >>> # Initializing a model from the gemma-3-4b style configuration >>> model = Gemma3nTextConfig(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma3n" sub_configs = { "text_config": Gemma3nTextConfig, "vision_config": Gemma3nVisionConfig, "audio_config": Gemma3nAudioConfig, } def __init__( self, text_config: Optional[Union[Gemma3nTextConfig, dict[str, Any]]] = None, vision_config: Optional[Union[Gemma3nVisionConfig, dict[str, Any]]] = None, audio_config: Optional[Union[Gemma3nAudioConfig, dict[str, Any]]] = None, audio_soft_tokens_per_image: int = 188, vision_soft_tokens_per_image: int = 256, boi_token_id: int = 255_999, eoi_token_id: int = 262_144, image_token_id: int = 262_145, boa_token_id: int = 256_000, eoa_token_id: int = 262_272, audio_token_id: int = 262_273, initializer_range: float = 0.02, **kwargs, ): super().__init__(**kwargs) if isinstance(text_config, dict): text_config = Gemma3nTextConfig(**text_config) elif text_config is None: text_config = Gemma3nTextConfig() logger.info("text_config is None. Using default Gemma3nTextConfig.") if isinstance(vision_config, dict): vision_config = Gemma3nVisionConfig(**vision_config) elif vision_config is None: vision_config = Gemma3nVisionConfig() logger.info("vision_config is None. Using default Gemma3nVisionConfig.") if isinstance(audio_config, dict): audio_config = Gemma3nAudioConfig(**audio_config) elif audio_config is None: audio_config = Gemma3nAudioConfig() logger.info("audio_config is None. Using default Gemma3nAudioConfig.") self.text_config = text_config self.vision_config = vision_config self.audio_config = audio_config self.audio_soft_tokens_per_image = audio_soft_tokens_per_image self.vision_soft_tokens_per_image = vision_soft_tokens_per_image self.boi_token_id = boi_token_id self.eoi_token_id = eoi_token_id self.image_token_id = image_token_id self.boa_token_id = boa_token_id self.eoa_token_id = eoa_token_id self.audio_token_id = audio_token_id self.initializer_range = initializer_range __all__ = ["Gemma3nAudioConfig", "Gemma3nConfig", "Gemma3nTextConfig", "Gemma3nVisionConfig"]
transformers/src/transformers/models/gemma3n/configuration_gemma3n.py/0
{ "file_path": "transformers/src/transformers/models/gemma3n/configuration_gemma3n.py", "repo_id": "transformers", "token_count": 14583 }
498
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/glm4v/modular_glm4v.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_glm4v.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 The ZhipuAI Inc. team and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...video_utils import VideoInput class Glm4vVideosProcessorKwargs(VideosKwargs, total=False): fps: Union[list[float], float] class Glm4vImagesKwargs(ImagesKwargs): patch_size: Optional[int] temporal_patch_size: Optional[int] merge_size: Optional[int] class Glm4vProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Glm4vImagesKwargs _defaults = { "text_kwargs": { "padding": False, "return_mm_token_type_ids": False, }, } videos_kwargs: Glm4vVideosProcessorKwargs class Glm4vProcessor(ProcessorMixin): r""" Constructs a GLM-4V processor which wraps a GLM-4V image processor and a GLM-4 tokenizer into a single processor. [`~Glm4vProcessor.__call__`] and [`~Glm4vProcessor.decode`] for more information. Args: image_processor ([`Glm4vProcessor`], *optional*): The image processor is a required input. tokenizer ([`PreTrainedTokenizerFast`], *optional*): The tokenizer is a required input. video_processor ([`Glm4vVideoProcessor`], *optional*): The video processor is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer", "video_processor"] image_processor_class = "AutoImageProcessor" video_processor_class = "AutoVideoProcessor" tokenizer_class = ("PreTrainedTokenizer", "PreTrainedTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs): super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template) self.image_token = "<|image|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token self.video_token = "<|video|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token self.image_token_id = ( tokenizer.image_token_id if getattr(tokenizer, "image_token_id", None) else tokenizer.convert_tokens_to_ids(self.image_token) ) self.video_token_id = ( tokenizer.video_token_id if getattr(tokenizer, "video_token_id", None) else tokenizer.convert_tokens_to_ids(self.video_token) ) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, videos: VideoInput = None, **kwargs: Unpack[Glm4vProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode the text. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. """ output_kwargs = self._merge_kwargs( Glm4vProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if images is not None: image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"]) image_grid_thw = image_inputs["image_grid_thw"] else: image_inputs = {} image_grid_thw = None if videos is not None: videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"]) timestamps = videos_inputs.pop("timestamps") video_grid_thw = videos_inputs["video_grid_thw"] else: videos_inputs = {} timestamps = [] video_grid_thw = None if not isinstance(text, list): text = [text] text = text.copy() # below lines change text in-place if image_grid_thw is not None: merge_length = self.image_processor.merge_size**2 index = 0 for i in range(len(text)): while self.image_token in text[i]: num_image_tokens = image_grid_thw[index].prod() // merge_length text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) index += 1 text[i] = text[i].replace("<|placeholder|>", self.image_token) if video_grid_thw is not None: merge_length = self.video_processor.merge_size**2 video_index = 0 for i in range(len(text)): while self.video_token in text[i]: num_frames = video_grid_thw[video_index][0] video_structure = "" if hasattr(timestamps, "tolist"): timestamps_list = timestamps.tolist()[0] else: timestamps_list = timestamps[0] if isinstance(timestamps[0], list) else timestamps unique_timestamps = [] for idx in range(0, len(timestamps_list)): unique_timestamps.append(timestamps_list[idx]) selected_timestamps = unique_timestamps[:num_frames] while len(selected_timestamps) < num_frames: selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0) for frame_idx in range(num_frames): timestamp_sec = selected_timestamps[frame_idx] frame_structure = f"<|begin_of_image|>{self.image_token}<|end_of_image|>{timestamp_sec}" video_structure += frame_structure text[i] = text[i].replace(self.video_token, video_structure, 1) num_image_tokens = ( video_grid_thw[video_index].prod() // merge_length // video_grid_thw[video_index][0] ) for frame_idx in range(num_frames): if self.image_token in text[i]: text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1) video_index += 1 text[i] = text[i].replace("<|placeholder|>", self.image_token) return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False) text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) if return_mm_token_type_ids: array_ids = np.array(text_inputs["input_ids"]) mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) mm_token_type_ids[array_ids == self.image_token_id] = 1 text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (height, width) per each image. video_sizes (`list[list[int]]`, *optional*): The input sizes formatted as (num_frames, height, width) per each video. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: images_kwargs = Glm4vProcessorKwargs._defaults.get("images_kwargs", {}) images_kwargs.update(kwargs) merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size num_image_patches = [ self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes ] num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches] vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches}) if video_sizes is not None: videos_kwargs = Glm4vProcessorKwargs._defaults.get("videos_kwargs", {}) videos_kwargs.update(kwargs) num_video_patches = [ self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes ] num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches] vision_data["num_video_tokens"] = num_video_tokens return MultiModalData(**vision_data) def post_process_image_text_to_text( self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs ): """ Post-process the output of the model to decode the text. Args: generated_outputs (`torch.Tensor` or `np.ndarray`): The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)` or `(sequence_length,)`. skip_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method. **kwargs: Additional arguments to be passed to the tokenizer's `batch_decode method`. Returns: `list[str]`: The decoded text. """ return self.tokenizer.batch_decode( generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) __all__ = ["Glm4vProcessor"]
transformers/src/transformers/models/glm4v/processing_glm4v.py/0
{ "file_path": "transformers/src/transformers/models/glm4v/processing_glm4v.py", "repo_id": "transformers", "token_count": 6546 }
499
# coding=utf-8 # Copyright 2023 The Bigcode team and HuggingFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch GPTBigCode model.""" import math from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import is_flash_attn_available from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...utils import ( auto_docstring, can_return_tuple, logging, ) from .configuration_gpt_bigcode import GPTBigCodeConfig if is_flash_attn_available(): pass logger = logging.get_logger(__name__) # Fused kernels # Use separate functions for each case because conditionals prevent kernel fusion. # TODO: Could have better fused kernels depending on scaling, dropout and head mask. # Is it doable without writing 32 functions? @torch.jit.script def upcast_masked_softmax( x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor, scale: float, softmax_dtype: torch.dtype ): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def upcast_softmax(x: torch.Tensor, scale: float, softmax_dtype: torch.dtype): input_dtype = x.dtype x = x.to(softmax_dtype) * scale x = torch.nn.functional.softmax(x, dim=-1).to(input_dtype) return x @torch.jit.script def masked_softmax(x: torch.Tensor, mask: torch.Tensor, mask_value: torch.Tensor): x = torch.where(mask, x, mask_value) x = torch.nn.functional.softmax(x, dim=-1) return x def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class GPTBigCodeAttention(nn.Module): def __init__(self, config, is_cross_attention=False, layer_idx=None): super().__init__() self.config = config self.mask_value = None self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads self.kv_heads = 1 if self.multi_query else self.num_heads self.kv_dim = self.kv_heads * self.head_dim self.num_key_value_groups = self.num_heads // self.kv_heads self.split_size = self.embed_dim self.is_causal = True if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale_attn_weights = config.scale_attn_weights self.scaling = self.head_dim**-0.5 if config.scale_attn_weights else 1.0 self.is_cross_attention = is_cross_attention self.layer_idx = layer_idx self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32 self.scale_attention_softmax_in_fp32 = ( config.scale_attention_softmax_in_fp32 and config.attention_softmax_in_fp32 ) self.attn_pdrop = config.attn_pdrop if self.is_cross_attention: if self.multi_query: raise NotImplementedError("Multi-Query Attention not supported for cross_attention") self.c_attn = nn.Linear(self.embed_dim, 2 * self.embed_dim) self.q_attn = nn.Linear(self.embed_dim, self.embed_dim) else: self.c_attn = nn.Linear(self.embed_dim, self.embed_dim + 2 * self.kv_dim) self.c_proj = nn.Linear(self.embed_dim, self.embed_dim) self.attn_dropout = config.attn_pdrop self.resid_dropout = nn.Dropout(config.resid_pdrop) def forward( self, hidden_states: torch.Tensor, layer_past: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[ tuple[torch.Tensor, Optional[torch.Tensor]], tuple[torch.Tensor, Optional[torch.Tensor], tuple[torch.Tensor, ...]], ]: input_shape = hidden_states.shape[:-1] if layer_past is not None: if isinstance(layer_past, EncoderDecoderCache): is_updated = layer_past.is_updated.get(self.layer_idx) if self.is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = layer_past.cross_attention_cache else: curr_past_key_value = layer_past.self_attention_cache else: curr_past_key_value = layer_past if self.is_cross_attention: if not hasattr(self, "q_attn") or not self.is_cross_attention: raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPTBigCodeAttention(..., is_cross_attention=True)`." ) if layer_past is not None and is_updated: # reuse k,v, cross_attentions key = curr_past_key_value.layers[self.layer_idx].keys value = curr_past_key_value.layers[self.layer_idx].values else: query = self.q_attn(hidden_states).view(*input_shape, -1, self.head_dim).transpose(1, 2) key, value = self.c_attn(encoder_hidden_states).split((self.head_dim, self.head_dim), dim=-1) else: if self.multi_query: query, key, value = ( self.c_attn(hidden_states).unsqueeze(1).split((self.embed_dim, self.kv_dim, self.kv_dim), dim=3) ) query = query.view(*input_shape, -1, self.head_dim).transpose(1, 2) else: query, key, value = ( self.c_attn(hidden_states) .view(*hidden_states.shape[:2], self.num_heads, 3 * self.head_dim) .transpose(1, 2) .split(3 * [self.head_dim], dim=3) ) if layer_past is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not self.is_cross_attention else None key, value = curr_past_key_value.update(key, value, self.layer_idx, {"cache_position": cache_position}) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if self.is_cross_attention: layer_past.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query, key, value, attention_mask, dropout=0.0 if not self.training else self.attn_dropout, scaling=self.scaling, head_mask=head_mask, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.c_proj(attn_output) attn_output = self.resid_dropout(attn_output) return attn_output, attn_weights class GPTBigCodeMLP(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = nn.Linear(embed_dim, intermediate_size) self.c_proj = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) # Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP.forward def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class GPTBigCodeBlock(nn.Module): def __init__(self, config, layer_idx=None): super().__init__() hidden_size = config.hidden_size self.inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.attn = GPTBigCodeAttention(config, layer_idx=layer_idx) self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) if config.add_cross_attention: if config.multi_query: raise NotImplementedError("Cross-attention not implemented for MQA") self.crossattention = GPTBigCodeAttention(config, is_cross_attention=True, layer_idx=layer_idx) self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigCodeMLP(self.inner_dim, config) def forward( self, hidden_states: Optional[tuple[torch.Tensor]], layer_past: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[ tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor] ]: residual = hidden_states hidden_states = self.ln_1(hidden_states) attn_outputs = self.attn( hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) attn_output = attn_outputs[0] # output_attn: a, present, (attentions) outputs = attn_outputs[1:] # residual connection hidden_states = attn_output + residual if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) residual = hidden_states hidden_states = self.ln_cross_attn(hidden_states) cross_attn_outputs = self.crossattention( hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) attn_output = cross_attn_outputs[0] # residual connection hidden_states = residual + attn_output outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights residual = hidden_states hidden_states = self.ln_2(hidden_states) feed_forward_hidden_states = self.mlp(hidden_states) hidden_states = residual + feed_forward_hidden_states return (hidden_states,) + outputs @auto_docstring class GPTBigCodePreTrainedModel(PreTrainedModel): config: GPTBigCodeConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["GPTBigCodeBlock"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (GPTBigCodeMLP, GPTBigCodeAttention)): # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. # > -- GPT-2 :: https://openai.com/blog/better-language-models/ # # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py module.c_proj.weight.data.normal_( mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)) ) module.c_proj._is_hf_initialized = True elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class GPTBigCodeModel(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.multi_query = config.multi_query self.embed_dim = config.hidden_size self.wte = nn.Embedding(config.vocab_size, self.embed_dim) self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([GPTBigCodeBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) max_positions = config.max_position_embeddings self.register_buffer( "bias", torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)), persistent=False ) self.gradient_checkpointing = False self._use_sdpa = config._attn_implementation == "sdpa" self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, position_ids=position_ids, past_key_values=past_key_values, ) if self._use_flash_attention_2: encoder_attention_mask = ( encoder_attention_mask.bool() if (encoder_attention_mask is not None and 0 in encoder_attention_mask) else None ) else: # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if ( self.config.add_cross_attention and encoder_hidden_states is not None and encoder_attention_mask is not None ): if encoder_attention_mask.dim() == 2: encoder_attention_mask.unsqueeze(1) assert encoder_attention_mask.dim() == 3 encoder_attention_mask = encoder_attention_mask.bool().unsqueeze(2 if self.multi_query else 1) else: encoder_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block( hidden_states, past_key_values, causal_mask, head_mask[i], encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[2],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) @auto_docstring( custom_intro=""" The GPT_BIGCODE Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """ ) class GPTBigCodeForCausalLM(GPTBigCodePreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.transformer = GPTBigCodeModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, input_ids_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) @auto_docstring( custom_intro=""" The GPTBigCode Model transformer with a sequence classification head on top (linear layer). [`GPTBigCodeForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class GPTBigCodeForSequenceClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring class GPTBigCodeForTokenClassification(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ]
transformers/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py/0
{ "file_path": "transformers/src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py", "repo_id": "transformers", "token_count": 17995 }
500
# coding=utf-8 # Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """openai model configuration""" from ...configuration_utils import PretrainedConfig, layer_type_validation from ...modeling_rope_utils import rope_config_validation class GptOssConfig(PretrainedConfig): r""" This will yield a configuration to that of the BERT [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. """ model_type = "gpt_oss" base_model_pp_plan = { "embed_tokens": (["input_ids"], ["inputs_embeds"]), "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), "norm": (["hidden_states"], ["hidden_states"]), } base_model_tp_plan = { "layers.*.self_attn.q_proj": "colwise", "layers.*.self_attn.k_proj": "colwise", "layers.*.self_attn.v_proj": "colwise", "layers.*.self_attn.o_proj": "rowwise", "layers.*.self_attn.sinks": "local_rowwise", "layers.*.mlp.experts": "gather", "layers.*.mlp.router": "ep_router", "layers.*.mlp.experts.gate_up_proj": "grouped_gemm", "layers.*.mlp.experts.gate_up_proj_bias": "grouped_gemm", "layers.*.mlp.experts.down_proj": "grouped_gemm", "layers.*.mlp.experts.down_proj_bias": "grouped_gemm", } def __init__( self, num_hidden_layers: int = 36, num_local_experts: int = 128, vocab_size: int = 201088, hidden_size: int = 2880, intermediate_size: int = 2880, head_dim: int = 64, num_attention_heads: int = 64, num_key_value_heads: int = 8, sliding_window: int = 128, rope_theta: float = 150000.0, tie_word_embeddings=False, hidden_act: str = "silu", initializer_range: float = 0.02, max_position_embeddings=131072, rms_norm_eps: float = 1e-5, rope_scaling={"rope_type": "yarn", "factor": 32.0, "beta_fast": 32.0, "beta_slow": 1.0, "truncate": False}, attention_dropout: float = 0.0, num_experts_per_tok=4, router_aux_loss_coef: float = 0.9, output_router_logits=False, use_cache=True, layer_types=None, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_local_experts = num_local_experts self.sliding_window = sliding_window self.num_experts_per_tok = num_experts_per_tok # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.rope_theta = rope_theta self.rope_scaling = rope_scaling self.attention_dropout = attention_dropout self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads self.layer_types = layer_types if self.layer_types is None: self.layer_types = [ "sliding_attention" if bool((i + 1) % 2) else "full_attention" for i in range(self.num_hidden_layers) ] layer_type_validation(self.layer_types) # Validate the correctness of rotary position embeddings parameters # BC: if there is a 'type' field, copy it it to 'rope_type'. if self.rope_scaling is not None and "type" in self.rope_scaling: self.rope_scaling["rope_type"] = self.rope_scaling["type"] rope_config_validation(self) self.attention_bias = True self.max_position_embeddings = max_position_embeddings self.router_aux_loss_coef = router_aux_loss_coef self.output_router_logits = output_router_logits self.use_cache = use_cache super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) __all__ = ["GptOssConfig"]
transformers/src/transformers/models/gpt_oss/configuration_gpt_oss.py/0
{ "file_path": "transformers/src/transformers/models/gpt_oss/configuration_gpt_oss.py", "repo_id": "transformers", "token_count": 2089 }
501
# coding=utf-8 # Copyright 2024 The Kyutai and HuggingFace Inc. teams. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Optional import torch import torch.nn as nn import torch.utils.checkpoint from ...utils import logging from ..gemma.modeling_gemma import GemmaForCausalLM, GemmaForSequenceClassification, GemmaForTokenClassification from ..granite.modeling_granite import GraniteAttention from ..llama.modeling_llama import LlamaDecoderLayer, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRotaryEmbedding from .configuration_helium import HeliumConfig logger = logging.get_logger(__name__) class HeliumRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return (self.weight.to(torch.float32) * hidden_states).to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class HeliumRotaryEmbedding(LlamaRotaryEmbedding): pass class HeliumMLP(LlamaMLP): pass def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., 0::2] x2 = x[..., 1::2] return torch.stack((-x2, x1), dim=-1).flatten(-2) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class HeliumAttention(GraniteAttention): def __init__(self, config: HeliumConfig, layer_idx: Optional[int] = None): super().__init__(config, layer_idx) self.o_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.scaling = 1 / math.sqrt(self.head_dim) class HeliumDecoderLayer(LlamaDecoderLayer): def __init__(self, config: HeliumConfig, layer_idx: Optional[int] = None): super().__init__() self.mlp = HeliumMLP(config) self.input_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps) class HeliumPreTrainedModel(LlamaPreTrainedModel): pass class HeliumModel(HeliumPreTrainedModel, LlamaModel): def __init__(self, config: HeliumConfig): super().__init__(config) self.layers = nn.ModuleList( [HeliumDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = HeliumRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = HeliumRotaryEmbedding(config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() class HeliumForCausalLM(GemmaForCausalLM): pass class HeliumForSequenceClassification(GemmaForSequenceClassification): pass class HeliumForTokenClassification(GemmaForTokenClassification): pass __all__ = [ "HeliumPreTrainedModel", "HeliumModel", "HeliumForCausalLM", "HeliumForSequenceClassification", "HeliumForTokenClassification", ]
transformers/src/transformers/models/helium/modular_helium.py/0
{ "file_path": "transformers/src/transformers/models/helium/modular_helium.py", "repo_id": "transformers", "token_count": 2128 }
502
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Hubert checkpoint.""" import argparse import torch from transformers import HubertConfig, HubertForSequenceClassification, Wav2Vec2FeatureExtractor, logging logging.set_verbosity_info() logger = logging.get_logger(__name__) SUPPORTED_MODELS = ["UtteranceLevel"] @torch.no_grad() def convert_s3prl_checkpoint(base_model_name, config_path, checkpoint_path, model_dump_path): """ Copy/paste/tweak model's weights to transformers design. """ checkpoint = torch.load(checkpoint_path, map_location="cpu", weights_only=True) if checkpoint["Config"]["downstream_expert"]["modelrc"]["select"] not in SUPPORTED_MODELS: raise NotImplementedError(f"The supported s3prl models are {SUPPORTED_MODELS}") downstream_dict = checkpoint["Downstream"] hf_congfig = HubertConfig.from_pretrained(config_path) hf_model = HubertForSequenceClassification.from_pretrained(base_model_name, config=hf_congfig) hf_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( base_model_name, return_attention_mask=True, do_normalize=False ) if hf_congfig.use_weighted_layer_sum: hf_model.layer_weights.data = checkpoint["Featurizer"]["weights"] hf_model.projector.weight.data = downstream_dict["projector.weight"] hf_model.projector.bias.data = downstream_dict["projector.bias"] hf_model.classifier.weight.data = downstream_dict["model.post_net.linear.weight"] hf_model.classifier.bias.data = downstream_dict["model.post_net.linear.bias"] hf_feature_extractor.save_pretrained(model_dump_path) hf_model.save_pretrained(model_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") args = parser.parse_args() convert_s3prl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
transformers/src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/hubert/convert_hubert_original_s3prl_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 988 }
503
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for IDEFICS2. """ from itertools import accumulate from typing import TYPE_CHECKING, Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, is_valid_image, load_image from ...processing_utils import ( ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack, ) from ...tokenization_utils_base import AddedToken, TextInput from ...utils import logging if TYPE_CHECKING: from ...tokenization_utils_base import PreTokenizedInput logger = logging.get_logger(__name__) def is_url(val) -> bool: return isinstance(val, str) and val.startswith("http") def is_image_or_image_url(elem): return is_url(elem) or is_valid_image(elem) class Idefics2ImagesKwargs(ImagesKwargs, total=False): image_seq_len: Optional[int] class Idefics2ProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Idefics2ImagesKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "is_split_into_words": False, }, "images_kwargs": {}, } class Idefics2Processor(ProcessorMixin): r""" Constructs a IDEFICS2 processor which wraps a LLama tokenizer and IDEFICS2 image processor into a single processor. [`IdeficsProcessor`] offers all the functionalities of [`Idefics2ImageProcessor`] and [`LlamaTokenizerFast`]. See the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information. Args: image_processor (`Idefics2ImageProcessor`): An instance of [`Idefics2ImageProcessor`]. The image processor is a required input. tokenizer (`PreTrainedTokenizerBase`, *optional*): An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input. image_seq_len (`int`, *optional*, defaults to 64): The length of the image sequence i.e. the number of <image> tokens per image in the input. This parameter is used to build the string from the input prompt and image tokens and should match the config.perceiver_config.resampler_n_latents value for the model used. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "Idefics2ImageProcessor" tokenizer_class = "AutoTokenizer" def __init__( self, image_processor, tokenizer=None, image_seq_len: int = 64, chat_template: Optional[str] = None, **kwargs ): if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") if not hasattr(tokenizer, "image_token"): self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True).content self.image_token = AddedToken("<image>", normalized=False, special=True).content tokens_to_add = {"additional_special_tokens": [self.fake_image_token, self.image_token]} tokenizer.add_special_tokens(tokens_to_add) self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token) else: self.fake_image_token = tokenizer.image_boundary_token self.image_token = tokenizer.image_token self.image_token_id = tokenizer.image_token_id self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True) tokenizer.add_special_tokens({"additional_special_tokens": [self.end_of_utterance_token]}) self.image_seq_len = image_seq_len super().__init__(image_processor, tokenizer, chat_template=chat_template) def _extract_images_from_prompts(self, prompts): prompt_images = [] for prompt in prompts: images = [] for elem in prompt: if is_valid_image(elem): images.append(elem) elif is_url(elem): images.append(load_image(elem)) prompt_images.append(images) return prompt_images def __call__( self, images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]] = None, text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None, audio=None, videos=None, **kwargs: Unpack[Idefics2ProcessorKwargs], ) -> BatchFeature: """ Processes the input prompts and returns a BatchEncoding. Example: ```python >>> import requests >>> from transformers import Idefics2Processor >>> from transformers.image_utils import load_image >>> processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b", image_seq_len=2) >>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example >>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" >>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg" >>> image1, image2 = load_image(url1), load_image(url2) >>> images = [[image1], [image2]] >>> text = [ ... "<image>In this image, we see", ... "bla bla bla<image>", ... ] >>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True) >>> input_ids = outputs.input_ids >>> input_tokens = processor.tokenizer.batch_decode(input_ids) >>> print(input_tokens) ['<s><fake_token_around_image><image><image><fake_token_around_image> In this image, we see', '<s> bla bla bla<fake_token_around_image><image><image><fake_token_around_image>'] ``` Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1. text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Wherever an image token, `<image>` is encountered it is expanded to `<fake_token_around_image>` + `<image>` * `image_seq_len` * <fake_token_around_image>`. return_tensors (`Union[str, TensorType]`, *optional*): If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more information. """ if text is None and images is None: raise ValueError("You must provide either `text` or `images`.") output_kwargs = self._merge_kwargs( Idefics2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) image_seq_len = output_kwargs["images_kwargs"].pop("image_seq_len", None) image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) n_images_in_text = [] inputs = {} if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") # Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len` fake_image_token = self.fake_image_token image_token = self.image_token image_str = f"{fake_image_token}{image_token * image_seq_len}{fake_image_token}" if self.image_processor.do_image_splitting: # A single image token is split into 4 patches + 1 original image image_str = image_str * 5 image_seq_len *= 5 prompt_strings = [] for sample in text: n_images_in_text.append(sample.count(image_token)) sample = sample.replace(image_token, image_str) # Remove any double fake tokens if images are adjacent sample = sample.replace(f"{fake_image_token}{fake_image_token}", f"{fake_image_token}") prompt_strings.append(sample) text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"]) self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"]) inputs.update(text_inputs) if images is not None: if is_image_or_image_url(images): images = [[images]] elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]): if text is not None: if sum(n_images_in_text) != len(images): raise ValueError( f"The total number of {image_token} tokens in the prompts should be the same as the number of images passed." f" Found {sum(n_images_in_text)} {image_token} tokens and {len(images)} images." ) # Reorganize the images to match the prompts cumsum_images_in_text = [0] + list(accumulate(n_images_in_text)) images = [ images[cumsum_images_in_text[i] : cumsum_images_in_text[i + 1]] for i in range(len(n_images_in_text)) ] else: images = [images] elif ( not isinstance(images, (list, tuple)) and not isinstance(images[0], (list, tuple)) and not is_image_or_image_url(images[0][0]) ): raise ValueError( "Invalid input images. Please provide a single image or a list of images or a list of list of images." ) n_images_in_images = [len(sample) for sample in images] if text is not None and not n_images_in_images == n_images_in_text: raise ValueError( f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same." ) # Load images if they are URLs images = [[load_image(im) for im in sample] for sample in images] image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"]) inputs.update(image_inputs) return BatchFeature(inputs, tensor_type=return_tensors) __all__ = ["Idefics2Processor"]
transformers/src/transformers/models/idefics2/processing_idefics2.py/0
{ "file_path": "transformers/src/transformers/models/idefics2/processing_idefics2.py", "repo_id": "transformers", "token_count": 5122 }
504
# coding=utf-8 # Copyright 2025 Deepseek AI and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example of run command (run from root): python src/transformers/models/janus/convert_janus_weights_to_hf.py --repo_id deepseek-ai/Janus-Pro-1B --local_dir tmp/hub_code_in --output_dir tmp/hub_code_out --safe_serialization Using provided local directory: tmp/hub_code_in """ import argparse import gc import json import os import re from typing import Optional import torch from accelerate import init_empty_weights from huggingface_hub import snapshot_download from transformers import ( AutoTokenizer, JanusConfig, JanusForConditionalGeneration, JanusVisionConfig, JanusVQVAEConfig, LlamaConfig, ) from transformers.models.janus.image_processing_janus import JanusImageProcessor from transformers.models.janus.processing_janus import JanusProcessor # Mappings MAPPINGS = { # Vision model r"(?<!gen_)vision_model\.vision_tower\.blocks\.(\d+)\.attn": r"model.vision_model.encoder.layers.\1.self_attn", r"(?<!gen_)vision_model.vision_tower.blocks": "model.vision_model.encoder.layers", r"(?<!gen_)vision_model.vision_tower.pos_embed": "model.vision_model.embeddings.position_embedding.weight", r"(?<!gen_)vision_model.vision_tower.patch_embed.proj": "model.vision_model.embeddings.patch_embedding", r"(?<!gen_)vision_model.vision_tower.norm": "model.vision_model.post_layernorm", r"(?P<pre>\b(vision_model|model\.vision_model)\b.*\.)proj(?=\.|\s|$)": r"\g<pre>projection_layer", r"(?P<pre>\b(vision_model|model\.vision_model)\b.*\.)norm(?=\.|\s|$)": r"\g<pre>layer_norm", r"(?P<pre>\b(vision_model|model\.vision_model)\b.*\.)norm1(?=\.|\s|$)": r"\g<pre>layer_norm1", r"(?P<pre>\b(vision_model|model\.vision_model)\b.*\.)norm2(?=\.|\s|$)": r"\g<pre>layer_norm2", r"\bvision_model\.vision_tower\.attn_pool\.[^\s$]*": None, # VQ Model r"gen_vision_model": "model.vqmodel", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)decoder\.conv_blocks(?=\.|\s|$)": r"\g<pre>decoder.up", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)encoder\.conv_blocks(?=\.|\s|$)": r"\g<pre>encoder.down", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)res(?=\.|\s|$)": r"\g<pre>block", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)mid\.0(?=\.|\s|$)": r"\g<pre>mid.block_1", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)mid\.1(?=\.|\s|$)": r"\g<pre>mid.attn_1", r"(?P<pre>\b(gen_vision_model|model\.vqmodel)\b.*\.)mid\.2(?=\.|\s|$)": r"\g<pre>mid.block_2", # Aligner Modules r"(gen_aligner)\.layers\.0": r"model.generation_aligner.fc1", r"(gen_aligner)\.layers\.2": r"model.generation_aligner.hidden_layers.0", r"(?<!gen_)(aligner)\.layers\.0": r"model.aligner.fc1", r"(?<!gen_)(aligner)\.layers\.2": r"model.aligner.hidden_layers.0", "gen_head.output_mlp_projector": "model.generation_head.proj_out", r"(\s|^)gen_embed": r"\1model.generation_embeddings", r"(\s|^)gen_head": r"\1model.generation_head", r"\b(gen_vision_model|model\.vqmodel)\.quantize\.codebook_used": None, # Language model r"(\s|^)language_model\.model": r"\1model.language_model", r"\b(model\.language_model|(?<!model\.)language_model)\.lm_head\.weight": "lm_head.weight", } CHAT_TEMPLATE = ( "{%set seps=['\n\n','<\uff5cend\u2581of\u2581sentence\uff5c>']%}" "{%set i=0%}" "{%for message in messages%}" "{%if message['role']|lower=='user'%}" "<|User|>: " "{%elif message['role']|lower=='assistant'%}" "<|Assistant|>:{%if not (loop.last and not add_generation_prompt and message['content'][0]['type']=='text' and message['content'][0]['text']=='')%} {%endif%}" "{%else%}" "{{message['role'].capitalize()}}: " "{%endif%}" "{%for content in message['content']%}" "{%if content['type']=='image'%}" "{%if not loop.first%}{{'\n'}}{%endif%}" "<image_placeholder>" "{%if not loop.last%}{{'\n'}}{%endif%}" "{%elif content['type']=='text'%}" "{%set text=content['text']%}" "{%if loop.first%}{%set text=text.lstrip()%}{%endif%}" "{%if loop.last%}{%set text=text.rstrip()%}{%endif%}" "{%if not loop.first and message['content'][loop.index0-1]['type']=='text'%}" "{{' '+text}}" "{%else%}" "{{text}}" "{%endif%}" "{%endif%}" "{%endfor%}" "{%if not loop.last or add_generation_prompt%}" "{%if message['role']|lower=='user'%}" "{{seps[0]}}" "{%else%}" "{{seps[1]}}" "{%endif%}" "{%endif%}" "{%endfor%}" "{%if add_generation_prompt%}<|Assistant|>:{%endif%}" ) def convert_old_keys_to_new_keys(state_dict): keys_as_text = "\n".join(state_dict.keys()) new_keys_as_text = keys_as_text for old, repl in MAPPINGS.items(): if repl is None: new_keys_as_text = re.sub(old, "", new_keys_as_text) else: new_keys_as_text = re.sub(old, repl, new_keys_as_text) output_dict = dict(zip(keys_as_text.split("\n"), new_keys_as_text.split("\n"))) return output_dict def split_tensor(tensor, key): """Splits a merged tensor (qkv or kv) into separate tensors and creates keys for each part.""" if "qkv" in key: prefix_to_replace = "qkv" num_splits = 3 new_keys = ["q_proj", "k_proj", "v_proj"] elif "kv" in key: prefix_to_replace = "kv" num_splits = 2 new_keys = ["k_proj", "v_proj"] else: raise ValueError(f"Unrecognized tensor type in key: {key}") split_size = tensor.shape[0] // num_splits tensors = torch.split(tensor, split_size, dim=0) return {key.replace(prefix_to_replace, new_keys[i]): tensors[i] for i in range(num_splits)} def convert_state_dict_to_hf(state_dict): """Convert state dict keys to HF format.""" conversion_dict = convert_old_keys_to_new_keys(state_dict) converted_state_dict = {} for old_key, new_key in conversion_dict.items(): if new_key: if "qkv" in new_key or "kv" in new_key: # Detect merged attention keys and split them. qkv_split_dict = split_tensor(state_dict[old_key], new_key) converted_state_dict.update(qkv_split_dict) else: converted_state_dict[new_key] = state_dict[old_key] # Embeddings will not have initial dimension pos_embed_key = "model.vision_model.embeddings.position_embedding.weight" converted_state_dict[pos_embed_key] = converted_state_dict[pos_embed_key].squeeze(0) return converted_state_dict def ensure_model_downloaded( repo_id: Optional[str] = None, revision: Optional[str] = None, local_dir: Optional[str] = None ) -> str: """ Ensures model files are downloaded locally, downloads them if not. Returns path to local files. Args: repo_id: The Hugging Face model repo ID (required if local_dir not provided) revision: Optional git revision to use local_dir: Optional local directory path where model files should be stored/found """ if local_dir is not None: if os.path.exists(local_dir): print(f"Using provided local directory: {local_dir}") else: # Create the local directory if it doesn't exist os.makedirs(local_dir, exist_ok=True) print(f"Created local directory: {local_dir}") if repo_id is None: raise ValueError("Either repo_id or local_dir must be provided") print(f"Ensuring {repo_id} (revision: {revision or 'latest'}) is downloaded...") try: # First try to find files locally download_dir = snapshot_download(repo_id, revision=revision, local_files_only=True, local_dir=local_dir) print(f"Found model files locally at {download_dir}") return download_dir except Exception: # If files not found locally, download them print(f"Downloading model files for {repo_id}...") download_dir = snapshot_download(repo_id, revision=revision, local_files_only=False, local_dir=local_dir) print(f"Downloaded model files to {download_dir}") return download_dir def load_model_state_dict(input_path: str) -> dict: """ Load model state dict, handling both single and sharded files. """ index_path = os.path.join(input_path, "pytorch_model.bin.index.json") single_file_path = os.path.join(input_path, "pytorch_model.bin") # Check if we have a sharded model if os.path.exists(index_path): print("Loading sharded model...") state_dict = {} with open(index_path, "r") as f: index = json.load(f) # Get unique shard files and load each one only once unique_shard_files = sorted(set(index["weight_map"].values())) for shard_file in unique_shard_files: print(f"Loading shard {shard_file}...") shard_path = os.path.join(input_path, shard_file) shard_dict = torch.load(shard_path, map_location="cpu") state_dict.update(shard_dict) return state_dict # Single file model elif os.path.exists(single_file_path): print("Loading single file model...") return torch.load(single_file_path, map_location="cpu") else: raise ValueError(f"No model files found in {input_path}") def convert_model( repo_id=None, local_dir=None, text_model_id=None, output_dir=None, output_hub_path=None, safe_serialization=True, revision=None, ): """Convert and save the model weights, processor, and configuration.""" if output_dir is None and output_hub_path is None: raise ValueError("At least one of output_dir or output_hub_path must be specified") if repo_id is None and local_dir is None: raise ValueError("Either repo_id or local_dir must be specified") # Create output directory if specified if output_dir: os.makedirs(output_dir, exist_ok=True) print(f"Created/verified output directory: {output_dir}") torch.set_default_dtype(torch.float16) # Download or locate model files input_path = ensure_model_downloaded(repo_id=repo_id, revision=revision, local_dir=local_dir) # Load configuration files required_files = ["config.json", "preprocessor_config.json", "special_tokens_map.json", "tokenizer_config.json"] missing_files = [f for f in required_files if not os.path.exists(os.path.join(input_path, f))] if missing_files: raise ValueError( f"The following required configuration files are missing from {input_path}: {', '.join(missing_files)}. " "Please ensure you have downloaded all necessary model files." ) with open(os.path.join(input_path, "config.json"), "r") as f: config_data = json.load(f) with open(os.path.join(input_path, "preprocessor_config.json"), "r") as f: preprocessor_config = json.load(f) with open(os.path.join(input_path, "special_tokens_map.json"), "r") as f: special_tokens_map = json.load(f) with open(os.path.join(input_path, "tokenizer_config.json"), "r") as f: tokenizer_config = json.load(f) # Create tokenizer directly from tokenizer.json if it exists tokenizer_json_path = os.path.join(input_path, "tokenizer.json") special_image_tokens = { "image_token": "<image_placeholder>", "boi_token": "<begin_of_image>", "eoi_token": "<end_of_image>", } if os.path.exists(tokenizer_json_path) and not text_model_id: tokenizer = AutoTokenizer.from_pretrained( input_path, # This will load tokenizer.json directly model_max_length=tokenizer_config["model_max_length"], extra_special_tokens=special_image_tokens, ) else: # Fallback to creating from text_model_id with special tokens tokenizer = AutoTokenizer.from_pretrained( text_model_id, bos_token=special_tokens_map["bos_token"], eos_token=special_tokens_map["eos_token"], pad_token=special_tokens_map["pad_token"], additional_special_tokens=special_tokens_map["additional_special_tokens"], model_max_length=tokenizer_config["model_max_length"], extra_special_tokens=special_image_tokens, ) # Create image processor from config image_processor_kwargs = {} for key in ["do_normalize", "image_mean", "image_std", "min_size", "rescale_factor"]: if key in preprocessor_config: image_processor_kwargs[key] = preprocessor_config[key] if "image_size" in preprocessor_config: image_processor_kwargs["size"] = { "height": preprocessor_config["image_size"], "width": preprocessor_config["image_size"], } image_processor = JanusImageProcessor(**image_processor_kwargs) # Create processor with chat template processor = JanusProcessor( image_processor=image_processor, tokenizer=tokenizer, chat_template=CHAT_TEMPLATE, use_default_system_prompt=True, ) if output_dir: print(f"Saving processor to {output_dir}...") processor.save_pretrained(output_dir) if output_hub_path: print(f"Pushing processor to hub at {output_hub_path}...") processor.push_to_hub(output_hub_path) # Create model configurations text_config_kwargs = {} for key in [ "vocab_size", "hidden_size", "intermediate_size", "num_hidden_layers", "num_attention_heads", "num_key_value_heads", "hidden_act", "max_position_embeddings", "dtype", ]: if key in config_data["language_config"]: text_config_kwargs[key] = config_data["language_config"][key] # Add token IDs from tokenizer text_config_kwargs.update( { "pad_token_id": tokenizer.pad_token_id, "bos_token_id": tokenizer.bos_token_id, "eos_token_id": tokenizer.eos_token_id, } ) text_config = LlamaConfig(**text_config_kwargs) # Create vision config vision_config_kwargs = {} if "image_size" in config_data["vision_config"]["params"]: vision_config_kwargs["image_size"] = config_data["vision_config"]["params"]["image_size"] # Add aligner params if present if "aligner_config" in config_data and "params" in config_data["aligner_config"]: if "n_embed" in config_data["aligner_config"]["params"]: vision_config_kwargs["projection_dim"] = config_data["aligner_config"]["params"]["n_embed"] if "depth" in config_data["aligner_config"]["params"]: vision_config_kwargs["depth"] = config_data["aligner_config"]["params"]["depth"] vision_config = JanusVisionConfig(**vision_config_kwargs) vq_config = JanusVQVAEConfig( embed_dim=config_data["gen_vision_config"]["params"]["n_embed"], num_embeddings=config_data["gen_vision_config"]["params"]["image_token_size"], projection_dim=config_data["gen_aligner_config"]["params"]["n_embed"], depth=config_data["gen_aligner_config"]["params"]["depth"], image_token_embed_dim=config_data["gen_head_config"]["params"]["image_token_embed"], ) # Create the main config config = JanusConfig( text_config=text_config, vision_config=vision_config, vq_config=vq_config, image_token_id=tokenizer.vocab.get("<image_placeholder>"), ) # Save the config if output_dir: config.save_pretrained(output_dir) if output_hub_path: config.push_to_hub(output_hub_path) # Initialize model with empty weights print("Creating empty model...") with init_empty_weights(): model = JanusForConditionalGeneration(config) model.generation_config._from_model_config = False model.generation_config.temperature = 1 model.generation_config.guidance_scale = 5 model.generation_config.pad_token_id = tokenizer.vocab.get("<\uff5c\u2581pad\u2581\uff5c>") if not hasattr(model.generation_config, "generation_kwargs"): model.generation_config.generation_kwargs = {} model.generation_config.generation_kwargs["boi_token_id"] = tokenizer.vocab.get("<begin_of_image>") # Load and convert state dict print("Loading state dict...") state_dict = load_model_state_dict(input_path) state_dict = convert_state_dict_to_hf(state_dict) # Load converted state dict print("Loading converted weights into model...") model.load_state_dict(state_dict, strict=True, assign=True) # Tie weights before any device mapping print("Tying weights...") model.tie_weights() # Save the model if output_dir: print(f"Saving model to {output_dir}...") model.save_pretrained(output_dir, safe_serialization=safe_serialization) if output_hub_path: print(f"Pushing model to hub at {output_hub_path}...") model.push_to_hub(output_hub_path, safe_serialization=safe_serialization) del state_dict, model gc.collect() # Validate the saved model if saved locally if output_dir: print("Reloading the local model to check if it's saved correctly...") # TODO: warning about weights not being tied is raised here regardless of model.tie_weights() above JanusForConditionalGeneration.from_pretrained(output_dir, device_map="auto") print("Local model reloaded successfully.") def main(): parser = argparse.ArgumentParser() parser.add_argument( "--repo_id", help="HuggingFace Hub repo ID for the model", default=None, ) parser.add_argument( "--local_dir", help="Local directory containing the model files", default=None, ) parser.add_argument( "--revision", help="Specific revision to download from the Hub", default=None, ) parser.add_argument( "--output_dir", help="Location to write HF model locally", default=None, ) parser.add_argument( "--output_hub_path", help="Repository ID to push model to hub (e.g. 'username/model-name')", default=None, ) parser.add_argument( "--text_model_id", help="Hub ID of the text model to get tokenizer from. Optional if tokenizer.json exists in the model directory.", required=False, ) parser.add_argument( "--safe_serialization", action="store_true", help="Whether to save using safetensors", ) args = parser.parse_args() if args.output_dir is None and args.output_hub_path is None: raise ValueError("At least one of --output_dir or --output_hub_path must be specified") if args.repo_id is None and args.local_dir is None: raise ValueError("Either --repo_id or --local_dir must be specified") convert_model( repo_id=args.repo_id, local_dir=args.local_dir, text_model_id=args.text_model_id, output_dir=args.output_dir, output_hub_path=args.output_hub_path, safe_serialization=args.safe_serialization, revision=args.revision, ) if __name__ == "__main__": main()
transformers/src/transformers/models/janus/convert_janus_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/janus/convert_janus_weights_to_hf.py", "repo_id": "transformers", "token_count": 8222 }
505
# coding=utf-8 # Copyright 2024 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from fairseq.checkpoint_utils import load_checkpoint_to_cpu from transformers import Kosmos2_5Config, Kosmos2_5ForConditionalGeneration KEYS_TO_MODIFY_MAPPING = { "gpt_model.decoder.output_projection": "text_model.lm_head", "gpt_model.decoder": "text_model.model", "img_connector": "image_to_text_projection", "img_model.embeddings": "vision_model.embeddings", "img_model.encoder": "vision_model.encoder", "img_model.layernorm": "vision_model.layernorm", "img_model": "vision_model", "ln_pre": "pre_layrnorm", "ln_post": "post_layernorm", "transformer.resblocks": "encoder.layers", "ts_attn": "self_attn", "ln_1": "layer_norm1", "ln_2": "layer_norm2", "c_fc": "fc1", "c_proj": "fc2", } KEYS_TO_IGNORE = [ # this buffer in the original code is only used to send weights to the desired device "gpt_model.decoder.embed_positions._float_tensor", # this weight is never used in the forward in the original KOSMOS-2.5) "gpt_model.decoder.self_attn_sope.scale", ] def rename_key(key): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) return key def convert_kosmos2_5_checkpoint_to_pytorch(checkpoint_path, pytorch_dump_folder_path): state = load_checkpoint_to_cpu(checkpoint_path) state_dict = state["model"] state_dict_keys = list(state_dict.keys()) config = Kosmos2_5Config() # This is necessary to match the results given by the original demo config.text_config.no_repeat_ngram_size = 3 model = Kosmos2_5ForConditionalGeneration(config) # convert (by renaming keys) converted_state_dict = {} for key in state_dict_keys: if key in KEYS_TO_IGNORE: continue renamed_key = rename_key(key) converted_state_dict[renamed_key] = state_dict[key] # set # check weight loading # check whether the state in converted_state_dict is the same as the state in the model model.load_state_dict(converted_state_dict, strict=True) # save the result model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--kosmos2_5_checkpoint_path", default="ckpt.pt", type=str, required=False, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default="ckpt", type=str, required=False, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_kosmos2_5_checkpoint_to_pytorch(args.kosmos2_5_checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/kosmos2_5/convert_kosmos2_5.py/0
{ "file_path": "transformers/src/transformers/models/kosmos2_5/convert_kosmos2_5.py", "repo_id": "transformers", "token_count": 1329 }
506
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch LayoutLMv3 model.""" import collections import math from typing import Optional, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( auto_docstring, logging, torch_int, ) from .configuration_layoutlmv3 import LayoutLMv3Config logger = logging.get_logger(__name__) class LayoutLMv3PatchEmbeddings(nn.Module): """LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying image sizes.""" def __init__(self, config): super().__init__() image_size = ( config.input_size if isinstance(config.input_size, collections.abc.Iterable) else (config.input_size, config.input_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) self.patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values, position_embedding=None): embeddings = self.proj(pixel_values) if position_embedding is not None: # interpolate the position embedding to the corresponding size position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1) position_embedding = position_embedding.permute(0, 3, 1, 2) patch_height, patch_width = embeddings.shape[2], embeddings.shape[3] position_embedding = F.interpolate(position_embedding, size=(patch_height, patch_width), mode="bicubic") embeddings = embeddings + position_embedding embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings class LayoutLMv3TextEmbeddings(nn.Module): """ LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size) def calculate_spatial_position_embeddings(self, bbox): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023)) w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023)) # below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) return spatial_position_embeddings def create_position_ids_from_input_ids(self, input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask return incremental_indices.long() + padding_idx def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) def forward( self, input_ids=None, bbox=None, token_type_ids=None, position_ids=None, inputs_embeds=None, ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to( input_ids.device ) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox) embeddings = embeddings + spatial_position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings @auto_docstring class LayoutLMv3PreTrainedModel(PreTrainedModel): config: LayoutLMv3Config base_model_prefix = "layoutlmv3" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, LayoutLMv3Model): if self.config.visual_embed: module.cls_token.data.zero_() module.pos_embed.data.zero_() class LayoutLMv3SelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias def cogview_attention(self, attention_scores, alpha=32): """ https://huggingface.co/papers/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better. """ scaled_attention_scores = attention_scores / alpha max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1) new_attention_scores = (scaled_attention_scores - max_value) * alpha return nn.Softmax(dim=-1)(new_attention_scores) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. # The attention scores QT K/√d could be significantly larger than input elements, and result in overflow. # Changing the computational order into QT(K/√d) alleviates the problem. (https://huggingface.co/papers/2105.13290) attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2)) if self.has_relative_attention_bias and self.has_spatial_attention_bias: attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size) elif self.has_relative_attention_bias: attention_scores += rel_pos / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. # Use the trick of the CogView paper to stabilize training attention_probs = self.cogview_attention(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput class LayoutLMv3SelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Attention with LayoutLMv2->LayoutLMv3 class LayoutLMv3Attention(nn.Module): def __init__(self, config): super().__init__() self.self = LayoutLMv3SelfAttention(config) self.output = LayoutLMv3SelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Layer with LayoutLMv2->LayoutLMv3 class LayoutLMv3Layer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LayoutLMv3Attention(config) self.intermediate = LayoutLMv3Intermediate(config) self.output = LayoutLMv3Output(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, rel_pos=None, rel_2d_pos=None, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class LayoutLMv3Encoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.has_relative_attention_bias = config.has_relative_attention_bias self.has_spatial_attention_bias = config.has_spatial_attention_bias if self.has_relative_attention_bias: self.rel_pos_bins = config.rel_pos_bins self.max_rel_pos = config.max_rel_pos self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False) if self.has_spatial_attention_bias: self.max_rel_2d_pos = config.max_rel_2d_pos self.rel_2d_pos_bins = config.rel_2d_pos_bins self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False) self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False) def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 if bidirectional: num_buckets //= 2 ret += (relative_position > 0).long() * num_buckets n = torch.abs(relative_position) else: n = torch.max(-relative_position, torch.zeros_like(relative_position)) # now n is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = n < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret def _cal_1d_pos_emb(self, position_ids): rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1) rel_pos = self.relative_position_bucket( rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos, ) # Since this is a simple indexing operation that is independent of the input, # no need to track gradients for this operation # # Without this no_grad context, training speed slows down significantly with torch.no_grad(): rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2) rel_pos = rel_pos.contiguous() return rel_pos def _cal_2d_pos_emb(self, bbox): position_coord_x = bbox[:, :, 0] position_coord_y = bbox[:, :, 3] rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1) rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1) rel_pos_x = self.relative_position_bucket( rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) rel_pos_y = self.relative_position_bucket( rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos, ) # Since this is a simple indexing operation that is independent of the input, # no need to track gradients for this operation # # Without this no_grad context, training speed slows down significantly with torch.no_grad(): rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2) rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2) rel_pos_x = rel_pos_x.contiguous() rel_pos_y = rel_pos_y.contiguous() rel_2d_pos = rel_pos_x + rel_pos_y return rel_2d_pos def forward( self, hidden_states, bbox=None, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, position_ids=None, patch_height=None, patch_width=None, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate class LayoutLMv3Intermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput class LayoutLMv3Output(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states @auto_docstring class LayoutLMv3Model(LayoutLMv3PreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config if config.text_embed: self.embeddings = LayoutLMv3TextEmbeddings(config) if config.visual_embed: # use the default pre-training parameters for fine-tuning (e.g., input_size) # when the input_size is larger in fine-tuning, we will interpolate the position embeddings in forward self.patch_embed = LayoutLMv3PatchEmbeddings(config) size = int(config.input_size / config.patch_size) self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size)) self.pos_drop = nn.Dropout(p=0.0) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: self.init_visual_bbox(image_size=(size, size)) self.norm = nn.LayerNorm(config.hidden_size, eps=1e-6) self.encoder = LayoutLMv3Encoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def init_visual_bbox(self, image_size=(14, 14), max_len=1000): """ Create the bounding boxes for the visual (patch) tokens. """ visual_bbox_x = torch.div( torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode="trunc" ) visual_bbox_y = torch.div( torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode="trunc" ) visual_bbox = torch.stack( [ visual_bbox_x[:-1].repeat(image_size[0], 1), visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1), visual_bbox_x[1:].repeat(image_size[0], 1), visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1), ], dim=-1, ).view(-1, 4) cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]]) self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0) def calculate_visual_bbox(self, device, dtype, batch_size): visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1) visual_bbox = visual_bbox.to(device).type(dtype) return visual_bbox def forward_image(self, pixel_values): embeddings = self.patch_embed(pixel_values) # add [CLS] token batch_size, seq_len, _ = embeddings.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings), dim=1) # add position embeddings if self.pos_embed is not None: embeddings = embeddings + self.pos_embed embeddings = self.pos_drop(embeddings) embeddings = self.norm(embeddings) return embeddings @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`): Indices of input sequence tokens in the vocabulary. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) bbox (`torch.LongTensor` of shape `(batch_size, token_sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. token_type_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, token_sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, token_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. Examples: ```python >>> from transformers import AutoProcessor, AutoModel >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") >>> outputs = model(**encoding) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape device = inputs_embeds.device elif pixel_values is not None: batch_size = len(pixel_values) device = pixel_values.device else: raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values") if input_ids is not None or inputs_embeds is not None: if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) embedding_output = self.embeddings( input_ids=input_ids, bbox=bbox, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) final_bbox = final_position_ids = None patch_height = patch_width = None if pixel_values is not None: patch_height, patch_width = ( torch_int(pixel_values.shape[2] / self.config.patch_size), torch_int(pixel_values.shape[3] / self.config.patch_size), ) visual_embeddings = self.forward_image(pixel_values) visual_attention_mask = torch.ones( (batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device ) if attention_mask is not None: attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1) else: attention_mask = visual_attention_mask if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_spatial_attention_bias: visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size) if bbox is not None: final_bbox = torch.cat([bbox, visual_bbox], dim=1) else: final_bbox = visual_bbox visual_position_ids = torch.arange( 0, visual_embeddings.shape[1], dtype=torch.long, device=device ).repeat(batch_size, 1) if input_ids is not None or inputs_embeds is not None: position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0) position_ids = position_ids.expand(input_shape) final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1) else: final_position_ids = visual_position_ids if input_ids is not None or inputs_embeds is not None: embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1) else: embedding_output = visual_embeddings embedding_output = self.LayerNorm(embedding_output) embedding_output = self.dropout(embedding_output) elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: if self.config.has_spatial_attention_bias: final_bbox = bbox if self.config.has_relative_attention_bias: position_ids = self.embeddings.position_ids[:, : input_shape[1]] position_ids = position_ids.expand_as(input_ids) final_position_ids = position_ids extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, None, device, dtype=embedding_output.dtype ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, bbox=final_bbox, position_ids=final_position_ids, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, patch_height=patch_height, patch_width=patch_width, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class LayoutLMv3ClassificationHead(nn.Module): """ Head for sentence-level classification tasks. Reference: RobertaClassificationHead """ def __init__(self, config, pool_feature=False): super().__init__() self.pool_feature = pool_feature if pool_feature: self.dense = nn.Linear(config.hidden_size * 3, config.hidden_size) else: self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, x): x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x @auto_docstring( custom_intro=""" LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g. for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda). """ ) class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv3 = LayoutLMv3Model(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) if config.num_labels < 10: self.classifier = nn.Linear(config.hidden_size, config.num_labels) else: self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, pixel_values: Optional[torch.LongTensor] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Examples: ```python >>> from transformers import AutoProcessor, AutoModelForTokenClassification >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7) >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> word_labels = example["ner_tags"] >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt") >>> outputs = model(**encoding) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, pixel_values=pixel_values, ) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # only take the text part of the output representations sequence_output = outputs[0][:, :seq_length] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.layoutlmv3 = LayoutLMv3Model(config) self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, bbox: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.LongTensor] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Examples: ```python >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering >>> from datasets import load_dataset >>> import torch >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> question = "what's his name?" >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt") >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for document image classification tasks such as the [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset. """ ) class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.layoutlmv3 = LayoutLMv3Model(config) self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False) self.init_weights() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, bbox: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.LongTensor] = None, ) -> Union[tuple, SequenceClassifierOutput]: r""" bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Examples: ```python >>> from transformers import AutoProcessor, AutoModelForSequenceClassification >>> from datasets import load_dataset >>> import torch >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") >>> sequence_label = torch.tensor([1]) >>> outputs = model(**encoding, labels=sequence_label) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.layoutlmv3( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, bbox=bbox, pixel_values=pixel_values, ) sequence_output = outputs[0][:, 0, :] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "LayoutLMv3ForQuestionAnswering", "LayoutLMv3ForSequenceClassification", "LayoutLMv3ForTokenClassification", "LayoutLMv3Model", "LayoutLMv3PreTrainedModel", ]
transformers/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py/0
{ "file_path": "transformers/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py", "repo_id": "transformers", "token_count": 23698 }
507
# coding=utf-8 # Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """LeViT model configuration""" from collections import OrderedDict from collections.abc import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) class LevitConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LeViT [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size of the input image. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. kernel_size (`int`, *optional*, defaults to 3): The kernel size for the initial convolution layers of patch embedding. stride (`int`, *optional*, defaults to 2): The stride size for the initial convolution layers of patch embedding. padding (`int`, *optional*, defaults to 1): The padding size for the initial convolution layers of patch embedding. patch_size (`int`, *optional*, defaults to 16): The patch size for embeddings. hidden_sizes (`list[int]`, *optional*, defaults to `[128, 256, 384]`): Dimension of each of the encoder blocks. num_attention_heads (`list[int]`, *optional*, defaults to `[4, 8, 12]`): Number of attention heads for each attention layer in each block of the Transformer encoder. depths (`list[int]`, *optional*, defaults to `[4, 4, 4]`): The number of layers in each encoder block. key_dim (`list[int]`, *optional*, defaults to `[16, 16, 16]`): The size of key in each of the encoder blocks. drop_path_rate (`int`, *optional*, defaults to 0): The dropout probability for stochastic depths, used in the blocks of the Transformer encoder. mlp_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`): Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the encoder blocks. attention_ratios (`list[int]`, *optional*, defaults to `[2, 2, 2]`): Ratio of the size of the output dimension compared to input dimension of attention layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import LevitConfig, LevitModel >>> # Initializing a LeViT levit-128S style configuration >>> configuration = LevitConfig() >>> # Initializing a model (with random weights) from the levit-128S style configuration >>> model = LevitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "levit" def __init__( self, image_size=224, num_channels=3, kernel_size=3, stride=2, padding=1, patch_size=16, hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, mlp_ratio=[2, 2, 2], attention_ratio=[2, 2, 2], initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.num_channels = num_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.hidden_sizes = hidden_sizes self.num_attention_heads = num_attention_heads self.depths = depths self.key_dim = key_dim self.drop_path_rate = drop_path_rate self.patch_size = patch_size self.attention_ratio = attention_ratio self.mlp_ratio = mlp_ratio self.initializer_range = initializer_range self.down_ops = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] # Copied from transformers.models.vit.configuration_vit.ViTOnnxConfig class LevitOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 __all__ = ["LevitConfig", "LevitOnnxConfig"]
transformers/src/transformers/models/levit/configuration_levit.py/0
{ "file_path": "transformers/src/transformers/models/levit/configuration_levit.py", "repo_id": "transformers", "token_count": 2201 }
508
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert LLaVa-NeXT-Video checkpoints from the original repository. URL: https://github.com/LLaVA-VL/LLaVA-NeXT/tree/inference """ import argparse import glob import json from pathlib import Path import torch from accelerate import init_empty_weights from huggingface_hub import hf_hub_download, snapshot_download from safetensors import safe_open from transformers import ( AddedToken, AutoConfig, AutoTokenizer, LlavaNextImageProcessor, LlavaNextVideoConfig, LlavaNextVideoForConditionalGeneration, LlavaNextVideoImageProcessor, LlavaNextVideoProcessor, ) KEYS_TO_MODIFY_MAPPING = { "model.vision_tower.": "", ".vision_resampler": "", # all lmms-lab models do avg pooling, so no vision_resampler "model.mm_projector": "multi_modal_projector", "model": "model.model", "vision_model.model": "vision_model", "lm_head": "language_model.lm_head", "model.model": "language_model.model", "multi_modal_projector.0": "multi_modal_projector.linear_1", "multi_modal_projector.2": "multi_modal_projector.linear_2", "language_model.model.image_newline": "image_newline", } # {{SYSTEM_PROMPT}} USER: <image>\n{{PROMPT}} ASSISTANT:" assistant end with "</s> " chat_vicuna = ( "{% for message in messages %}" "{% if message['role'] == 'system' %}" "{{ message['content'][0]['text'] }}" "{% else %}" "{{ message['role'].upper() + ': '}}" "{% endif %}" "{# Render all images first #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}" "{{ '<image>\n' }}" "{% endfor %}" "{# Render all text next #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}" "{{ content['text'] + ' '}}" "{% endfor %}" "{% endfor %}" "{% if add_generation_prompt %}" "{{ 'ASSISTANT:' }}" "{% endif %}" ) # "[INST] <image>\nWhat is shown in this image? [/INST]" assistant end with "</s> " chat_mistral = ( "{% for message in messages %}" "{% if message['role'] == 'user' %}" "{{ '[INST] ' }}" "{# Render all images first #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}" "{{ '<image>\n' }}" "{% endfor %}" "{# Render all text next #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}" "{{ content['text'] }}" "{% endfor %}" "{{' [/INST]' }}" "{% elif message['role'] == 'assistant' %}" r"{{ ' ' + message['content'][0]['text'] + '<\s> '}}" "{% else %}" "{{ raise_exception('Only user and assistant roles are supported!') }}" "{% endif %}" "{% endfor %}" ) # "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n" chat_yi = ( "{% for message in messages %}" "{{'<|im_start|>' + message['role'] + '\n'}}" "{# Render all images first #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}" "{{ '<image>\n' }}" "{% endfor %}" "{# Render all text next #}" "{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}" "{{ content['text'] }}" "{% endfor %}" "{{'<|im_end|>' + '\n'}}" "{% endfor %}" "{% if add_generation_prompt %}" "{{ '<|im_start|>assistant\n' }}" "{% endif %}" ) model2template = { "lmms-lab/LLaVA-NeXT-Video-7B-32K": chat_mistral, "lmms-lab/LLaVA-NeXT-Video-7B": chat_vicuna, "lmms-lab/LLaVA-NeXT-Video-7B-DPO": chat_vicuna, "lmms-lab/LLaVA-NeXT-Video-34B": chat_yi, "lmms-lab/LLaVA-NeXT-Video-34B-DPO": chat_yi, } def load_original_state_dict(model_id): directory_path = snapshot_download(repo_id=model_id, allow_patterns=["*.safetensors"]) original_state_dict = {} for path in glob.glob(f"{directory_path}/*"): if path.endswith(".safetensors"): with safe_open(path, framework="pt", device="cpu") as f: for key in f.keys(): original_state_dict[key] = f.get_tensor(key) return original_state_dict def convert_state_dict_to_hf(state_dict): new_state_dict = {} for key, value in state_dict.items(): if key.endswith(".inv_freq"): continue for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: key = key.replace(key_to_modify, new_key) new_state_dict[key] = value.to(torch.bfloat16) return new_state_dict def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False): # load original config filepath = hf_hub_download(repo_id=model_id, filename="config.json", repo_type="model") with open(filepath) as f: data = json.load(f) print(data) if model_id == "lmms-lab/LLaVA-NeXT-Video-7B-32K": text_model_id = "mistralai/Mistral-7B-Instruct-v0.2" video_token_id = 32000 image_token_id = 32001 overwrite_text_config = {} elif model_id in ["lmms-lab/LLaVA-NeXT-Video-7B", "lmms-lab/LLaVA-NeXT-Video-7B-DPO"]: text_model_id = "lmsys/vicuna-7b-v1.5" video_token_id = 32000 image_token_id = 32001 overwrite_text_config = {"factor": 2.0, "type": "linear"} elif model_id in ["lmms-lab/LLaVA-NeXT-Video-34B", "lmms-lab/LLaVA-NeXT-Video-34B-DPO"]: text_model_id = "NousResearch/Nous-Hermes-2-Yi-34B" video_token_id = 64000 image_token_id = 64001 overwrite_text_config = {} else: raise ValueError("Incorrect checkpoint referenced. Text model-id not identified!") vision_model_id = data["mm_vision_tower"] torch.set_default_dtype(torch.bfloat16) text_config = AutoConfig.from_pretrained(text_model_id) text_config = text_config.to_dict() text_config.update(overwrite_text_config) tokenizer = AutoTokenizer.from_pretrained(text_model_id, use_fast=True, padding_side="left") tokenizer.add_tokens(AddedToken("<video>", special=True, normalized=False), special_tokens=True) tokenizer.add_tokens(AddedToken("<image>", special=True, normalized=False), special_tokens=True) image_processor = LlavaNextImageProcessor.from_pretrained(vision_model_id) video_processor = LlavaNextVideoImageProcessor.from_pretrained(vision_model_id) processor = LlavaNextVideoProcessor( tokenizer=tokenizer, video_processor=video_processor, image_processor=image_processor, chat_template=model2template[model_id], ) config = LlavaNextVideoConfig( text_config=text_config, image_grid_pinpoints=image_processor.image_grid_pinpoints, use_image_newline_parameter=True, video_token_id=video_token_id, image_token_id=image_token_id, ) with init_empty_weights(): model = LlavaNextVideoForConditionalGeneration(config) # load original state dict state_dict = load_original_state_dict(model_id) state_dict = convert_state_dict_to_hf(state_dict) model.load_state_dict(state_dict, assign=True, strict=True) # See https://nlp.stanford.edu/~johnhew/vocab-expansion.html for why we get mean/stdev this way to expand embeddings pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data mu = torch.mean(pre_expansion_embeddings, dim=0).float() n = pre_expansion_embeddings.size()[0] sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma) # We add an image token so we resize the model # Pad to 64 for performance reasons pad_shape = 64 vocab_size = config.text_config.vocab_size # this one has 2 additional tokens, namely <image>, <video> and <pad> num_tokens = vocab_size + 3 model.resize_token_embeddings(num_tokens, pad_to_multiple_of=pad_shape) model.language_model.model.embed_tokens.weight.data[vocab_size:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[vocab_size:].shape[0])), dim=0, ) model.language_model.lm_head.weight.data[vocab_size:] = torch.stack( tuple(dist.sample() for _ in range(model.language_model.lm_head.weight.data[vocab_size:].shape[0])), dim=0, ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor for {model_id} to {pytorch_dump_folder_path}") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: repo_id = model_id.split("/")[-1] print(f"Pushing model to hub repo: {repo_id}") model.push_to_hub(f"llava-hf/{repo_id}-hf") processor.push_to_hub(f"llava-hf/{repo_id}-hf") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", help="Hub location of the model to convert", default="lmms-lab/LLaVA-NeXT-Video-7B", choices=[ "lmms-lab/LLaVA-NeXT-Video-7B", "lmms-lab/LLaVA-NeXT-Video-7B-DPO", "lmms-lab/LLaVA-NeXT-Video-7B-32K", "lmms-lab/LLaVA-NeXT-Video-34B", "lmms-lab/LLaVA-NeXT-Video-34B-DPO", ], required=False, ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_llava_to_hf(args.model_id, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/llava_next_video/convert_llava_next_video_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/llava_next_video/convert_llava_next_video_weights_to_hf.py", "repo_id": "transformers", "token_count": 4402 }
509
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for MarkupLM. """ from typing import Optional, Union from ...file_utils import TensorType from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy class MarkupLMProcessor(ProcessorMixin): r""" Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single processor. [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model. It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings. Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`. Args: feature_extractor (`MarkupLMFeatureExtractor`): An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input. tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`): An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input. parse_html (`bool`, *optional*, defaults to `True`): Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths. """ feature_extractor_class = "MarkupLMFeatureExtractor" tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast") parse_html = True def __call__( self, html_strings=None, nodes=None, xpaths=None, node_labels=None, questions=None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and returns the output. Optionally, one can also provide a `text` argument which is passed along as first sequence. Please refer to the docstring of the above two methods for more information. """ # first, create nodes and xpaths if self.parse_html: if html_strings is None: raise ValueError("Make sure to pass HTML strings in case `parse_html` is set to `True`") if nodes is not None or xpaths is not None or node_labels is not None: raise ValueError( "Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`" ) features = self.feature_extractor(html_strings) nodes = features["nodes"] xpaths = features["xpaths"] else: if html_strings is not None: raise ValueError("You have passed HTML strings but `parse_html` is set to `False`.") if nodes is None or xpaths is None: raise ValueError("Make sure to pass nodes and xpaths in case `parse_html` is set to `False`") # # second, apply the tokenizer if questions is not None and self.parse_html: if isinstance(questions, str): questions = [questions] # add batch dimension (as the feature extractor always adds a batch dimension) encoded_inputs = self.tokenizer( text=questions if questions is not None else nodes, text_pair=nodes if questions is not None else None, xpaths=xpaths, node_labels=node_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) return encoded_inputs __all__ = ["MarkupLMProcessor"]
transformers/src/transformers/models/markuplm/processing_markuplm.py/0
{ "file_path": "transformers/src/transformers/models/markuplm/processing_markuplm.py", "repo_id": "transformers", "token_count": 2244 }
510
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for MGT-STR CHAR.""" import json import os from typing import Optional from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json"} class MgpstrTokenizer(PreTrainedTokenizer): """ Construct a MGP-STR char tokenizer. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. unk_token (`str`, *optional*, defaults to `"[GO]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"[GO]"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"[s]"`): The end of sequence token. pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`): A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by attention mechanisms or loss computation. """ vocab_files_names = VOCAB_FILES_NAMES def __init__(self, vocab_file, unk_token="[GO]", bos_token="[GO]", eos_token="[s]", pad_token="[GO]", **kwargs): with open(vocab_file, encoding="utf-8") as vocab_handle: self.vocab = json.load(vocab_handle) self.decoder = {v: k for k, v in self.vocab.items()} super().__init__( unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs, ) @property def vocab_size(self): return len(self.vocab) def get_vocab(self): vocab = dict(self.vocab).copy() vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): """Tokenize a string.""" char_tokens = [] for s in text: char_tokens.extend(s) return char_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.vocab.get(token, self.vocab.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n") return (vocab_file,) __all__ = ["MgpstrTokenizer"]
transformers/src/transformers/models/mgp_str/tokenization_mgp_str.py/0
{ "file_path": "transformers/src/transformers/models/mgp_str/tokenization_mgp_str.py", "repo_id": "transformers", "token_count": 1518 }
511
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mllama model configuration""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...modeling_rope_utils import rope_config_validation from ...utils import logging logger = logging.get_logger(__name__) class MllamaVisionConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaVisionModel`]. It is used to instantiate an Mllama vision model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-11B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1280): Dimensionality of the encoder layers and the pooler layer. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_global_layers (`int`, *optional*, defaults to 8): Number of global layers in the Transformer encoder. Vision model has a second transformer encoder, called global. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. intermediate_size (`int`, *optional*, defaults to 5120): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. vision_output_dim (`int`, *optional*, defaults to 7680): Dimensionality of the vision model output. Includes output of transformer encoder with intermediate layers and global transformer encoder. image_size (`int`, *optional*, defaults to 448): The size (resolution) of each image *tile*. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. max_num_tiles (`int`, *optional*, defaults to 4): Maximum number of tiles for image splitting. intermediate_layers_indices (`list[int]`, *optional*, defaults to [3, 7, 15, 23, 30]): Indices of intermediate layers of transformer encoder from which to extract and output features. These output features are concatenated with final hidden state of transformer encoder. supported_aspect_ratios (`list[list[int]]`, *optional*): List of supported aspect ratios for image splitting. If not specified, the default supported aspect ratios are [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] for `max_num_tiles=4`. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import MllamaVisionConfig, MllamaVisionModel >>> # Initializing a Llama config >>> config = MllamaVisionConfig() >>> # Initializing a vision model from the mllama-11b style configuration >>> model = MllamaVisionModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size: int = 1280, hidden_act: str = "gelu", num_hidden_layers: int = 32, num_global_layers: int = 8, num_attention_heads: int = 16, num_channels: int = 3, intermediate_size: int = 5120, vision_output_dim: int = 7680, image_size: int = 448, patch_size: int = 14, norm_eps: float = 1e-5, max_num_tiles: int = 4, intermediate_layers_indices: Optional[list[int]] = None, supported_aspect_ratios: Optional[list[list[int]]] = None, initializer_range: float = 0.02, **kwargs, ): if supported_aspect_ratios is None: if max_num_tiles != 4: raise ValueError("max_num_tiles must be 4 for default supported aspect ratios") supported_aspect_ratios = [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] if intermediate_layers_indices is None: intermediate_layers_indices = [3, 7, 15, 23, 30] self.hidden_size = hidden_size self.hidden_act = hidden_act self.num_hidden_layers = num_hidden_layers self.num_channels = num_channels self.intermediate_size = intermediate_size self.image_size = image_size self.vision_output_dim = vision_output_dim self.patch_size = patch_size self.intermediate_layers_indices = intermediate_layers_indices self.num_global_layers = num_global_layers self.max_num_tiles = max_num_tiles self.norm_eps = norm_eps self.attention_heads = num_attention_heads self.supported_aspect_ratios = supported_aspect_ratios self.initializer_range = initializer_range super().__init__(**kwargs) @property def max_aspect_ratio_id(self) -> int: return len(self.supported_aspect_ratios) class MllamaTextConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaTextModel`]. It is used to instantiate an Mllama text model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-11B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 128256): Vocabulary size of the Mllama text model. Defines the maximum number of different tokens that can be represented by the `inputs_ids` passed when calling [`MllamaTextModel`]. hidden_size (`int`, *optional*, defaults to 4096): Dimensionality of the embeddings and hidden states. hidden_act (`str` or `Callable`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the encoder and pooler. num_hidden_layers (`int`, *optional*, defaults to 40): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If not specified, will default to `num_attention_heads`. intermediate_size (`int`, *optional*, defaults to 14336): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. rope_theta (`float`, *optional*, defaults to `500000.0`): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. Expected contents: `rope_type` (`str`): The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. `factor` (`float`, *optional*): Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. `original_max_position_embeddings` (`int`, *optional*): Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during pretraining. `attention_factor` (`float`, *optional*): Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention computation. If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. `beta_fast` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear ramp function. If unspecified, it defaults to 32. `beta_slow` (`float`, *optional*): Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear ramp function. If unspecified, it defaults to 1. `short_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to short contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `long_factor` (`list[float]`, *optional*): Only used with 'longrope'. The scaling factor to be applied to long contexts (< `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 `low_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE `high_freq_factor` (`float`, *optional*): Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. max_position_embeddings (`int`, *optional*, defaults to 131072): The maximum sequence length that this model might ever be used with. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings cross_attention_layers (`list[int]`, *optional*): Indices of the cross attention layers. If not specified, will default to [3, 8, 13, 18, 23, 28, 33, 38]. dropout (`float`, *optional*, defaults to 0): The dropout probability for self- and cross-attention layers. bos_token_id (`int`, *optional*, defaults to 128000): The id of the beginning of sentence token. eos_token_id (`int`, *optional*, defaults to 128001): The id of the end of sentence token. pad_token_id (`int`, *optional*, defaults to 128004): The id of the padding token. Example: ```python >>> from transformers import MllamaTextModel, MllamaTextConfig >>> # Initializing a Mllama text config >>> config = MllamaTextConfig() >>> # Initializing a model from the Mllama text configuration >>> model = MllamaTextModel(config) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama_text_model" base_config_key = "text_config" def __init__( self, vocab_size: int = 128256, hidden_size: int = 4096, hidden_act: str = "silu", num_hidden_layers: int = 40, num_attention_heads: int = 32, num_key_value_heads: int = 8, intermediate_size: int = 14_336, rope_theta: float = 500_000, rope_scaling: Optional[dict] = None, rms_norm_eps: float = 1e-5, max_position_embeddings: int = 131_072, initializer_range: float = 0.02, use_cache: bool = True, tie_word_embeddings: bool = False, cross_attention_layers: Optional[list[int]] = None, dropout: float = 0, bos_token_id: int = 128000, eos_token_id: int = 128001, pad_token_id: Optional[int] = 128004, **kwargs, ): if cross_attention_layers is None: cross_attention_layers = [3, 8, 13, 18, 23, 28, 33, 38] self.vocab_size = vocab_size self.num_hidden_layers = num_hidden_layers self.cross_attention_layers = cross_attention_layers self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.initializer_range = initializer_range self.use_cache = use_cache self.rope_theta = rope_theta self.rms_norm_eps = rms_norm_eps self.intermediate_size = intermediate_size self.dropout = dropout self.hidden_act = hidden_act self.rope_scaling = rope_scaling self.max_position_embeddings = max_position_embeddings rope_config_validation(self) super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) class MllamaConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MllamaForConditionalGeneration`]. It is used to instantiate an Mllama model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Mllama-9B. e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaVisionConfig`): The config object or dictionary of the vision backbone. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaTextConfig`): The config object or dictionary of the text backbone. image_token_index (`int`, *optional*, defaults to 128256): The image token index to encode the image prompt. Example: ```python >>> from transformers import MllamaForConditionalGeneration, MllamaConfig, MllamaVisionConfig, MllamaTextConfig >>> # Initializing a CLIP-vision config >>> vision_config = MllamaVisionConfig() >>> # Initializing a Llama config >>> text_config = MllamaTextConfig() >>> # Initializing a mllama-11b style configuration >>> configuration = MllamaConfig(vision_config, text_config) >>> # Initializing a model from the mllama-11b style configuration >>> model = MllamaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mllama" attribute_map = { "image_token_id": "image_token_index", } sub_configs = {"text_config": MllamaTextConfig, "vision_config": MllamaVisionConfig} def __init__( self, vision_config=None, text_config=None, image_token_index=128256, **kwargs, ): if vision_config is None: self.vision_config = MllamaVisionConfig() logger.info("vision_config is None, using default mllama vision config") elif isinstance(vision_config, dict): self.vision_config = MllamaVisionConfig(**vision_config) elif isinstance(vision_config, MllamaVisionConfig): self.vision_config = vision_config self.image_token_index = image_token_index if text_config is None: self.text_config = MllamaTextConfig() logger.info("text_config is None, using default mllama text config") elif isinstance(text_config, dict): self.text_config = MllamaTextConfig(**text_config) elif isinstance(text_config, MllamaTextConfig): self.text_config = text_config super().__init__(**kwargs) __all__ = ["MllamaConfig"]
transformers/src/transformers/models/mllama/configuration_mllama.py/0
{ "file_path": "transformers/src/transformers/models/mllama/configuration_mllama.py", "repo_id": "transformers", "token_count": 7086 }
512
# MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math import os import warnings from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.replace("ffn_layer", "ffn") name = name.replace("FakeLayerNorm", "LayerNorm") name = name.replace("extra_output_weights", "dense/kernel") name = name.replace("bert", "mobilebert") name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape, ( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class NoNorm(nn.Module): def __init__(self, feat_size, eps=None): super().__init__() self.bias = nn.Parameter(torch.zeros(feat_size)) self.weight = nn.Parameter(torch.ones(feat_size)) def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: return input_tensor * self.weight + self.bias NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm} class MobileBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.hidden_size = config.hidden_size self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) embed_dim_multiplier = 3 if self.trigram_input else 1 embedded_input_size = self.embedding_size * embed_dim_multiplier self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://huggingface.co/papers/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = torch.cat( [ nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0), inputs_embeds, nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0), ], dim=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MobileBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.true_hidden_size, self.all_head_size) self.key = nn.Linear(config.true_hidden_size, self.all_head_size) self.value = nn.Linear( config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> tuple[torch.Tensor]: batch_size, seq_length, _ = query_tensor.shape query_layer = ( self.query(query_tensor) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(key_tensor) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(value_tensor) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MobileBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) if not self.use_bottleneck: layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MobileBertSelfAttention(config) self.output = MobileBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, query_tensor: torch.Tensor, key_tensor: torch.Tensor, value_tensor: torch.Tensor, layer_input: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> tuple[torch.Tensor]: self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, ) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. attention_output = self.output(self_outputs[0], layer_input) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MobileBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class OutputBottleneck(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) else: self.bottleneck = OutputBottleneck(config) def forward( self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor ) -> torch.Tensor: layer_output = self.dense(intermediate_states) if not self.use_bottleneck: layer_output = self.dropout(layer_output) layer_output = self.LayerNorm(layer_output + residual_tensor_1) else: layer_output = self.LayerNorm(layer_output + residual_tensor_1) layer_output = self.bottleneck(layer_output, residual_tensor_2) return layer_output class BottleneckLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size) self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: layer_input = self.dense(hidden_states) layer_input = self.LayerNorm(layer_input) return layer_input class Bottleneck(nn.Module): def __init__(self, config): super().__init__() self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.input = BottleneckLayer(config) if self.key_query_shared_bottleneck: self.attention = BottleneckLayer(config) def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor]: # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) class FFNOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor: layer_outputs = self.dense(hidden_states) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class FFNLayer(nn.Module): def __init__(self, config): super().__init__() self.intermediate = MobileBertIntermediate(config) self.output = FFNOutput(config) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: intermediate_output = self.intermediate(hidden_states) layer_outputs = self.output(intermediate_output, hidden_states) return layer_outputs class MobileBertLayer(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = MobileBertAttention(config) self.intermediate = MobileBertIntermediate(config) self.output = MobileBertOutput(config) if self.use_bottleneck: self.bottleneck = Bottleneck(config) if config.num_feedforward_networks > 1: self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, ) -> tuple[torch.Tensor]: if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 self_attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] s = (attention_output,) outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output, hidden_states) outputs = ( (layer_output,) + outputs + ( torch.tensor(1000), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) return outputs class MobileBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class MobileBertPooler(nn.Module): def __init__(self, config): super().__init__() self.do_activate = config.classifier_activation if self.do_activate: self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) pooled_output = torch.tanh(pooled_output) return pooled_output class MobileBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MobileBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MobileBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False) self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self) -> None: self.decoder.bias = self.bias def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.transform(hidden_states) hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0)) hidden_states += self.decoder.bias return hidden_states class MobileBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class MobileBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> tuple[torch.Tensor]: prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score @auto_docstring class MobileBertPreTrainedModel(PreTrainedModel): config: MobileBertConfig load_tf_weights = load_tf_weights_in_mobilebert base_model_prefix = "mobilebert" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, NoNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, MobileBertLMPredictionHead): module.bias.data.zero_() @dataclass @auto_docstring( custom_intro=""" Output type of [`MobileBertForPreTraining`]. """ ) class MobileBertForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None seq_relationship_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring class MobileBertModel(MobileBertPreTrainedModel): """ https://huggingface.co/papers/2004.02984 """ def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = MobileBertEmbeddings(config) self.encoder = MobileBertEncoder(config) self.pooler = MobileBertPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """ ) class MobileBertForPreTraining(MobileBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[torch.FloatTensor] = None, output_hidden_states: Optional[torch.FloatTensor] = None, return_dict: Optional[torch.FloatTensor] = None, ) -> Union[tuple, MobileBertForPreTrainingOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Examples: ```python >>> from transformers import AutoTokenizer, MobileBertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) >>> # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return MobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class MobileBertForMaskedLM(MobileBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.cls = MobileBertOnlyMLMHead(config) self.config = config # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MobileBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output: torch.Tensor) -> torch.Tensor: seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score @auto_docstring( custom_intro=""" MobileBert Model with a `next sentence prediction (classification)` head on top. """ ) class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple, NextSentencePredictorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`. - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Examples: ```python >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> loss = outputs.loss >>> logits = outputs.logits ```""" if "next_sentence_label" in kwargs: warnings.warn( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" " `labels` instead.", FutureWarning, ) labels = kwargs.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_score,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing class MobileBertForSequenceClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.mobilebert = MobileBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing class MobileBertForQuestionAnswering(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing class MobileBertForMultipleChoice(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing class MobileBertForTokenClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ]
transformers/src/transformers/models/mobilebert/modeling_mobilebert.py/0
{ "file_path": "transformers/src/transformers/models/mobilebert/modeling_mobilebert.py", "repo_id": "transformers", "token_count": 26753 }
513
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/modernbert/modular_modernbert.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_modernbert.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2024 Answer.AI, LightOn, and contributors, and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math from contextlib import nullcontext from typing import Optional, Union import torch import torch.nn.functional as F from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, is_flash_attn_2_available, logging from ...utils.import_utils import is_triton_available from .configuration_modernbert import ModernBertConfig if is_flash_attn_2_available(): from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func from flash_attn.layers.rotary import RotaryEmbedding from flash_attn.ops.triton.rotary import apply_rotary else: RotaryEmbedding = object logger = logging.get_logger(__name__) class ApplyRotaryEmbUnpad(torch.autograd.Function): @staticmethod def forward( ctx, qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): # (total_nnz, 3, nheads, headdim) qkv = qkv.contiguous() total_nnz, _three, _nheads, headdim = qkv.shape # We need qkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor # qk = rearrange(qkv[:, :2], "b_s t h d -> b_s (t h) d") qk = qkv[:, :2].view(total_nnz, -1, headdim) apply_rotary( qk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, interleaved=False, inplace=True, ) ctx.save_for_backward(cos, sin, cu_seqlens) ctx.max_seqlen = max_seqlen return qkv @staticmethod def backward(ctx, do): cos, sin, cu_seqlens = ctx.saved_tensors do = do.contiguous() total_nnz, _three, _nheads, headdim = do.shape # We need dqkv to be contiguous so that when we reshape to combine (3, nheads) dimensions, # we get the same tensor dqk = do[:, :2].view(total_nnz, -1, headdim) apply_rotary( dqk, cos, sin, seqlen_offsets=0, cu_seqlens=cu_seqlens, max_seqlen=ctx.max_seqlen, interleaved=False, inplace=True, conjugate=True, ) return do, None, None, None, None, None, None def apply_rotary_unpadded( qkv, cos, sin, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, ): """ Arguments: qkv: (total_nnz, 3, nheads, headdim) - input tensor for packed QKV. cos, sin: (seqlen_rotary, rotary_dim / 2) interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of 1st half and 2nd half (GPT-NeoX style). inplace: if True, apply rotary embedding in-place. seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount. Most commonly used in inference when we have KV cache. cu_seqlens: (batch + 1,) or None max_seqlen: int Return: out: (total_nnz, dim) rotary_dim must be <= headdim Apply rotary embedding to the first rotary_dim of x. """ return ApplyRotaryEmbUnpad.apply(qkv, cos, sin, cu_seqlens, max_seqlen) class ModernBertUnpaddedRotaryEmbedding(RotaryEmbedding): """ The rotary position embeddings applied directly to unpadded sequences. """ def __init__( self, dim: int, base: float = 10000.0, max_seqlen: Optional[int] = None, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ): """ max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ, the cos_sin_cache will be recomputed during the forward pass. """ super().__init__(dim=dim, base=base, device=device, interleaved=False) self.max_seqlen = max_seqlen if max_seqlen is not None and device is not None and dtype is not None: self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype) def forward( self, qkv: torch.Tensor, cu_seqlens: torch.Tensor, max_seqlen: Optional[int] = None, ) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: """ Apply rotary embedding *inplace* to qkv. qkv: (total_nnz, 3, nheads, headdim) cu_seqlens: (batch + 1,) cumulative sequence lengths max_seqlen: int max seq length in the batch """ if max_seqlen is not None: self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype) qkv = apply_rotary_unpadded( qkv, self._cos_cached, self._sin_cached, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, ) return qkv def extra_repr(self) -> str: return f"dim={self.dim}, base={self.base}, scale_base={self.scale_base}" class ModernBertEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.drop = nn.Dropout(config.embedding_dropout) @torch.compile(dynamic=True) def compiled_embeddings(self, input_ids: torch.LongTensor) -> torch.Tensor: return self.drop(self.norm(self.tok_embeddings(input_ids))) def forward( self, input_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None ) -> torch.Tensor: if inputs_embeds is not None: hidden_states = self.drop(self.norm(inputs_embeds)) else: hidden_states = ( self.compiled_embeddings(input_ids) if self.config.reference_compile else self.drop(self.norm(self.tok_embeddings(input_ids))) ) return hidden_states class ModernBertMLP(nn.Module): """Applies the GLU at the end of each ModernBERT layer. Compared to the default BERT architecture, this block replaces :class:`~transformers.model.bert.modeling_bert.BertIntermediate` and :class:`~transformers.model.bert.modeling_bert.SelfOutput` with a single module that has similar functionality. """ def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.Wi = nn.Linear(config.hidden_size, int(config.intermediate_size) * 2, bias=config.mlp_bias) self.act = ACT2FN[config.hidden_activation] self.drop = nn.Dropout(config.mlp_dropout) self.Wo = nn.Linear(config.intermediate_size, config.hidden_size, bias=config.mlp_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: input, gate = self.Wi(hidden_states).chunk(2, dim=-1) return self.Wo(self.drop(self.act(input) * gate)) class ModernBertRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: ModernBertConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def eager_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, output_attentions: Optional[bool] = False, **_kwargs, ) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) scale = module.head_dim**-0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scale if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=module.attention_dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bs, -1, dim) if output_attentions: return (attn_output, attn_weights) return (attn_output,) def flash_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, rotary_emb: ModernBertUnpaddedRotaryEmbedding, cu_seqlens: torch.Tensor, max_seqlen: int, local_attention: tuple[int, int], bs: int, dim: int, target_dtype: torch.dtype = torch.bfloat16, **_kwargs, ) -> tuple[torch.Tensor]: # (total_seqlen, 3, nheads, headdim) qkv = rotary_emb(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen) convert_dtype = qkv.dtype not in (torch.float16, torch.bfloat16) if convert_dtype: # FA2 implementation only supports fp16 and bf16. If FA2 is supported, # bfloat16 must be supported as of FA2 2.5.7. (Turing GPUs not supported) orig_dtype = qkv.dtype qkv = qkv.to(target_dtype) attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) attn = attn.to(orig_dtype) # type: ignore else: attn = flash_attn_varlen_qkvpacked_func( qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, dropout_p=module.attention_dropout if module.training else 0.0, deterministic=module.deterministic_flash_attn, window_size=local_attention, ) return (attn.view(bs, dim),) def sdpa_attention_forward( module: "ModernBertAttention", qkv: torch.Tensor, attention_mask: torch.Tensor, sliding_window_mask: torch.Tensor, position_ids: Optional[torch.LongTensor], local_attention: tuple[int, int], bs: int, dim: int, **_kwargs, ) -> tuple[torch.Tensor]: # qkv: [batch_size, seqlen, 3, nheads, headdim] cos, sin = module.rotary_emb(qkv, position_ids=position_ids) query, key, value = qkv.transpose(3, 1).unbind(dim=2) # query, key, value: [batch_size, heads, seq_len, head_dim] query, key = apply_rotary_pos_emb(query, key, cos, sin) if local_attention != (-1, -1): attention_mask = sliding_window_mask attn_output = ( F.scaled_dot_product_attention( query, key, value, dropout_p=module.attention_dropout if module.training else 0.0, attn_mask=attention_mask, ) .transpose(1, 2) .contiguous() ) attn_output = attn_output.view(bs, -1, dim) return (attn_output,) MODERNBERT_ATTENTION_FUNCTION = { "flash_attention_2": flash_attention_forward, "eager": eager_attention_forward, "sdpa": sdpa_attention_forward, } class ModernBertAttention(nn.Module): """Performs multi-headed self attention on a batch of unpadded sequences. If Flash Attention 2 is installed, this module uses Flash Attention to improve throughput. If Flash Attention 2 is not installed, the implementation will use PyTorch's SDPA kernel, which requires padding and unpadding inputs, adding some overhead. See `forward` method for additional details. """ def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config self.layer_id = layer_id if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})" ) self.attention_dropout = config.attention_dropout self.deterministic_flash_attn = config.deterministic_flash_attn self.num_heads = config.num_attention_heads self.head_dim = config.hidden_size // config.num_attention_heads self.all_head_size = self.head_dim * self.num_heads self.Wqkv = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=config.attention_bias) if layer_id % config.global_attn_every_n_layers != 0: self.local_attention = (config.local_attention // 2, config.local_attention // 2) rope_theta = config.local_rope_theta if config.local_rope_theta is not None else config.global_rope_theta max_position_embeddings = config.local_attention else: self.local_attention = (-1, -1) max_position_embeddings = config.max_position_embeddings rope_theta = config.global_rope_theta if config._attn_implementation == "flash_attention_2": self.rotary_emb = ModernBertUnpaddedRotaryEmbedding( dim=self.head_dim, max_seqlen=max_position_embeddings, base=rope_theta ) else: config_copy = copy.deepcopy(config) config_copy.rope_theta = rope_theta self.rotary_emb = ModernBertRotaryEmbedding(config=config_copy) self.Wo = nn.Linear(config.hidden_size, config.hidden_size, bias=config.attention_bias) self.out_drop = nn.Dropout(config.attention_dropout) if config.attention_dropout > 0.0 else nn.Identity() self.pruned_heads = set() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, **kwargs, ) -> torch.Tensor: qkv = self.Wqkv(hidden_states) bs = hidden_states.shape[0] if self.config._attn_implementation == "flash_attention_2": qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) else: qkv = qkv.view(bs, -1, 3, self.num_heads, self.head_dim) attn_outputs = MODERNBERT_ATTENTION_FUNCTION[self.config._attn_implementation]( self, qkv=qkv, rotary_emb=self.rotary_emb, local_attention=self.local_attention, bs=bs, dim=self.all_head_size, output_attentions=output_attentions, **kwargs, ) hidden_states = attn_outputs[0] hidden_states = self.out_drop(self.Wo(hidden_states)) return (hidden_states,) + attn_outputs[1:] # add attentions if outputted class ModernBertEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None): super().__init__() self.config = config if layer_id == 0: self.attn_norm = nn.Identity() else: self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.attn = ModernBertAttention(config=config, layer_id=layer_id) self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.mlp = ModernBertMLP(config) @torch.compile(dynamic=True) def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.mlp(self.mlp_norm(hidden_states)) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, output_attentions: Optional[bool] = False, ) -> torch.Tensor: attn_outputs = self.attn( self.attn_norm(hidden_states), attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = hidden_states + attn_outputs[0] mlp_output = ( self.compiled_mlp(hidden_states) if self.config.reference_compile else self.mlp(self.mlp_norm(hidden_states)) ) hidden_states = hidden_states + mlp_output return (hidden_states,) + attn_outputs[1:] # add attentions if outputted @auto_docstring class ModernBertPreTrainedModel(PreTrainedModel): config: ModernBertConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["ModernBertEmbeddings", "ModernBertEncoderLayer"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = False def _init_weights(self, module: nn.Module): cutoff_factor = self.config.initializer_cutoff_factor if cutoff_factor is None: cutoff_factor = 3 def init_weight(module: nn.Module, std: float): nn.init.trunc_normal_( module.weight, mean=0.0, std=std, a=-cutoff_factor * std, b=cutoff_factor * std, ) if isinstance(module, nn.Linear): if module.bias is not None: nn.init.zeros_(module.bias) stds = { "in": self.config.initializer_range, "out": self.config.initializer_range / math.sqrt(2.0 * self.config.num_hidden_layers), "embedding": self.config.initializer_range, "final_out": self.config.hidden_size**-0.5, } if isinstance(module, ModernBertEmbeddings): init_weight(module.tok_embeddings, stds["embedding"]) elif isinstance(module, ModernBertMLP): init_weight(module.Wi, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertAttention): init_weight(module.Wqkv, stds["in"]) init_weight(module.Wo, stds["out"]) elif isinstance(module, ModernBertPredictionHead): init_weight(module.dense, stds["out"]) elif isinstance(module, ModernBertForMaskedLM): init_weight(module.decoder, stds["out"]) elif isinstance( module, ( ModernBertForSequenceClassification, ModernBertForMultipleChoice, ModernBertForTokenClassification, ModernBertForQuestionAnswering, ), ): init_weight(module.classifier, stds["final_out"]) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) if module.bias is not None: module.bias.data.zero_() def _check_and_adjust_attn_implementation( self, attn_implementation: Optional[str], is_init_check: bool = False ) -> str: """ Checks and dispatches to hhe requested attention implementation. """ # If the user didn't specify anything, try to use flash_attention_2 if available. # Otherwise we fall back to the default SDPA -> Eager from the super() method. # ModernBert's FA2 implementation correctly handles non-fp16/bf16 dtypes, we don't # need the FA2 warning for non-fp16/bf16 dtypes so we set fp16 for the FA2 check. try: attn_implementation = ( "flash_attention_2" if attn_implementation is None and self._flash_attn_2_can_dispatch() else attn_implementation ) except (ValueError, ImportError): pass return super()._check_and_adjust_attn_implementation( attn_implementation=attn_implementation, is_init_check=is_init_check ) def _maybe_set_compile(self): if self.config.reference_compile is False: return if hasattr(self, "hf_device_map") and len(self.hf_device_map) > 1: if self.config.reference_compile: logger.warning_once( "If `accelerate` split the model across devices, `torch.compile` will not work. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "mps": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.mps` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.device.type == "cpu": if self.config.reference_compile: logger.warning_once( "Compiling the model with `torch.compile` and using a `torch.cpu` device is not supported. " "Falling back to non-compiled mode." ) self.config.reference_compile = False if self.config.reference_compile is None: self.config.reference_compile = is_triton_available() def resize_token_embeddings(self, *args, **kwargs): model_embeds = super().resize_token_embeddings(*args, **kwargs) if self.config.reference_compile in {True, None}: if self.config.reference_compile: logger.warning_once( "Resizing token embeddings with `torch.compile` is not supported. Falling back to non-compiled mode." ) self.config.reference_compile = False return model_embeds def _unpad_modernbert_input( inputs: torch.Tensor, attention_mask: torch.Tensor, position_ids: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[torch.Tensor], Optional[torch.Tensor]]: """ Remove padding from input sequences. Args: inputs: (batch, seqlen, ...) or (batch, seqlen) attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid. position_ids: (batch, seqlen), int, position ids labels: (batch, seqlen), int, labels Returns: unpadded_inputs: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) cu_seqlens: (batch + 1), the cumulative sequence lengths max_seqlen_in_batch: int unpadded_position_ids: (total_nnz) or None unpadded_labels: (total_nnz) or None """ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = int(seqlens_in_batch.max().item()) cu_seqlens = torch.nn.functional.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) if inputs.dim() == 2: unpadded_inputs = inputs.flatten()[indices] else: batch, seqlen, *rest = inputs.shape shape = batch * seqlen unpadded_inputs = inputs.view(shape, *rest)[indices] unpadded_position_ids = position_ids.flatten()[indices] if position_ids is not None else None unpadded_labels = labels.flatten()[indices] if labels is not None else None return unpadded_inputs, indices, cu_seqlens, max_seqlen_in_batch, unpadded_position_ids, unpadded_labels def _pad_modernbert_output( inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int, ) -> torch.Tensor: """ Add padding to sequences. Args: inputs: (total_nnz, ...) or (total_nnz,), where total_nnz = number of tokens selected in attention_mask. indices: (total_nnz) batch: int, batch size seqlen: int, max sequence length Returns: padded_inputs: (batch, seqlen, ...) or (batch, seqlen) """ if inputs.dim() == 1: output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen) else: _, *rest = inputs.shape output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen, *rest) return padded_inputs @auto_docstring class ModernBertModel(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.embeddings = ModernBertEmbeddings(config) self.layers = nn.ModuleList( [ModernBertEncoderLayer(config, layer_id) for layer_id in range(config.num_hidden_layers)] ) self.final_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) self.gradient_checkpointing = False self.post_init() def get_input_embeddings(self): return self.embeddings.tok_embeddings def set_input_embeddings(self, value): self.embeddings.tok_embeddings = value @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor, ...], BaseModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None self._maybe_set_compile() if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) repad = False if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: repad = True if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, *_ = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask ) else: if position_ids is None: position_ids = torch.arange(seq_len, device=device).unsqueeze(0) attention_mask, sliding_window_mask = self._update_attention_mask( attention_mask, output_attentions=output_attentions ) hidden_states = self.embeddings(input_ids=input_ids, inputs_embeds=inputs_embeds) for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions and len(layer_outputs) > 1: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) hidden_states = self.final_norm(hidden_states) if repad: hidden_states = _pad_modernbert_output( inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_len ) if all_hidden_states is not None: all_hidden_states = tuple( _pad_modernbert_output(inputs=hs, indices=indices, batch=batch_size, seqlen=seq_len) for hs in all_hidden_states ) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) def _update_attention_mask(self, attention_mask: torch.Tensor, output_attentions: bool) -> torch.Tensor: if output_attentions: if self.config._attn_implementation == "sdpa": logger.warning_once( "Outputting attentions is only supported with the 'eager' attention implementation, " 'not with "sdpa". Falling back to `attn_implementation="eager"`.' ) self.config._attn_implementation = "eager" elif self.config._attn_implementation != "eager": logger.warning_once( "Outputting attentions is only supported with the eager attention implementation, " f'not with {self.config._attn_implementation}. Consider setting `attn_implementation="eager"`.' " Setting `output_attentions=False`." ) global_attention_mask = _prepare_4d_attention_mask(attention_mask, self.dtype) # Create position indices rows = torch.arange(global_attention_mask.shape[2]).unsqueeze(0) # Calculate distance between positions distance = torch.abs(rows - rows.T) # Create sliding window mask (1 for positions within window, 0 outside) window_mask = ( (distance <= self.config.local_attention // 2).unsqueeze(0).unsqueeze(0).to(attention_mask.device) ) # Combine with existing mask sliding_window_mask = global_attention_mask.masked_fill(window_mask.logical_not(), torch.finfo(self.dtype).min) return global_attention_mask, sliding_window_mask class ModernBertPredictionHead(nn.Module): def __init__(self, config: ModernBertConfig): super().__init__() self.config = config self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias) self.act = ACT2FN[config.classifier_activation] self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return self.norm(self.act(self.dense(hidden_states))) @auto_docstring( custom_intro=""" The ModernBert Model with a decoder head on top that is used for masked language modeling. """ ) class ModernBertForMaskedLM(ModernBertPreTrainedModel): _tied_weights_keys = ["decoder.weight"] def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=config.decoder_bias) self.sparse_prediction = self.config.sparse_prediction self.sparse_pred_ignore_index = self.config.sparse_pred_ignore_index # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, new_embeddings: nn.Linear): self.decoder = new_embeddings @torch.compile(dynamic=True) def compiled_head(self, output: torch.Tensor) -> torch.Tensor: return self.decoder(self.head(output)) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], MaskedLMOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() if self.config._attn_implementation == "flash_attention_2": if indices is None and cu_seqlens is None and max_seqlen is None: if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) if inputs_embeds is None: with torch.no_grad(): input_ids, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=input_ids, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) else: inputs_embeds, indices, cu_seqlens, max_seqlen, position_ids, labels = _unpad_modernbert_input( inputs=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, labels=labels ) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.sparse_prediction and labels is not None: # flatten labels and output first labels = labels.view(-1) last_hidden_state = last_hidden_state.view(labels.shape[0], -1) # then filter out the non-masked tokens mask_tokens = labels != self.sparse_pred_ignore_index last_hidden_state = last_hidden_state[mask_tokens] labels = labels[mask_tokens] logits = ( self.compiled_head(last_hidden_state) if self.config.reference_compile else self.decoder(self.head(last_hidden_state)) ) loss = None if labels is not None: loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs) if self.config._attn_implementation == "flash_attention_2": with nullcontext() if self.config.repad_logits_with_grad or labels is None else torch.no_grad(): logits = _pad_modernbert_output(inputs=logits, indices=indices, batch=batch_size, seqlen=seq_len) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return MaskedLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a sequence classification head on top that performs pooling. """ ) class ModernBertForSequenceClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() if input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) if batch_size is None and seq_len is None: if inputs_embeds is not None: batch_size, seq_len = inputs_embeds.shape[:2] else: batch_size, seq_len = input_ids.shape[:2] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones((batch_size, seq_len), device=device, dtype=torch.bool) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.config.classifier_pooling == "cls": last_hidden_state = last_hidden_state[:, 0] elif self.config.classifier_pooling == "mean": last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( dim=1, keepdim=True ) pooled_output = self.head(last_hidden_state) pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a token classification head on top, e.g. for Named Entity Recognition (NER) tasks. """ ) class ModernBertForTokenClassification(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.head(last_hidden_state) last_hidden_state = self.drop(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class ModernBertForQuestionAnswering(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.num_labels = config.num_labels self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict self._maybe_set_compile() outputs = self.model( input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.head(last_hidden_state) last_hidden_state = self.drop(last_hidden_state) logits = self.classifier(last_hidden_state) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() loss = None if start_positions is not None and end_positions is not None: loss = self.loss_function(start_logits, end_logits, start_positions, end_positions, **kwargs) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return QuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The ModernBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """ ) class ModernBertForMultipleChoice(ModernBertPreTrainedModel): def __init__(self, config: ModernBertConfig): super().__init__(config) self.config = config self.model = ModernBertModel(config) self.head = ModernBertPredictionHead(config) self.drop = torch.nn.Dropout(config.classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, sliding_window_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, batch_size: Optional[int] = None, seq_len: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" sliding_window_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding or far-away tokens. In ModernBert, only every few layers perform global attention, while the rest perform local attention. This mask is used to avoid attending to far-away tokens in the local attention layers when not using Flash Attention. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. indices (`torch.Tensor` of shape `(total_unpadded_tokens,)`, *optional*): Indices of the non-padding tokens in the input sequence. Used for unpadding the output. cu_seqlens (`torch.Tensor` of shape `(batch + 1,)`, *optional*): Cumulative sequence lengths of the input sequences. Used to index the unpadded tensors. max_seqlen (`int`, *optional*): Maximum sequence length in the batch excluding padding tokens. Used to unpad input_ids and pad output tensors. batch_size (`int`, *optional*): Batch size of the input sequences. Used to pad the output tensors. seq_len (`int`, *optional*): Sequence length of the input sequences including padding tokens. Used to pad the output tensors. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) self._maybe_set_compile() outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, sliding_window_mask=sliding_window_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, indices=indices, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen, batch_size=batch_size, seq_len=seq_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] if self.config.classifier_pooling == "cls": last_hidden_state = last_hidden_state[:, 0] elif self.config.classifier_pooling == "mean": last_hidden_state = (last_hidden_state * attention_mask.unsqueeze(-1)).sum(dim=1) / attention_mask.sum( dim=1, keepdim=True ) pooled_output = self.head(last_hidden_state) pooled_output = self.drop(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "ModernBertModel", "ModernBertPreTrainedModel", "ModernBertForMaskedLM", "ModernBertForSequenceClassification", "ModernBertForTokenClassification", "ModernBertForQuestionAnswering", "ModernBertForMultipleChoice", ]
transformers/src/transformers/models/modernbert/modeling_modernbert.py/0
{ "file_path": "transformers/src/transformers/models/modernbert/modeling_modernbert.py", "repo_id": "transformers", "token_count": 29112 }
514
# coding=utf-8 # Copyright 2020 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tensorflow mT5 model.""" from ...utils import logging from ..t5.modeling_tf_t5 import TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model from .configuration_mt5 import MT5Config logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" class TFMT5Model(TFT5Model): r""" This class overrides [`TFT5Model`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5Model, AutoTokenizer >>> model = TFMT5Model.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, return_tensors="tf") >>> labels = tokenizer(text_target=summary, return_tensors="tf") >>> outputs = model(input_ids=inputs["input_ids"], decoder_input_ids=labels["input_ids"]) >>> hidden_states = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config class TFMT5ForConditionalGeneration(TFT5ForConditionalGeneration): r""" This class overrides [`TFT5ForConditionalGeneration`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5ForConditionalGeneration, AutoTokenizer >>> model = TFMT5ForConditionalGeneration.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> summary = "Weiter Verhandlung in Syrien." >>> inputs = tokenizer(article, text_target=summary, return_tensors="tf") >>> outputs = model(**inputs) >>> loss = outputs.loss ```""" model_type = "mt5" config_class = MT5Config class TFMT5EncoderModel(TFT5EncoderModel): r""" This class overrides [`TFT5EncoderModel`]. Please check the superclass for the appropriate documentation alongside usage examples. Examples: ```python >>> from transformers import TFMT5EncoderModel, AutoTokenizer >>> model = TFMT5EncoderModel.from_pretrained("google/mt5-small") >>> tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") >>> article = "UN Offizier sagt, dass weiter verhandelt werden muss in Syrien." >>> input_ids = tokenizer(article, return_tensors="tf").input_ids >>> outputs = model(input_ids) >>> hidden_state = outputs.last_hidden_state ```""" model_type = "mt5" config_class = MT5Config __all__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
transformers/src/transformers/models/mt5/modeling_tf_mt5.py/0
{ "file_path": "transformers/src/transformers/models/mt5/modeling_tf_mt5.py", "repo_id": "transformers", "token_count": 1132 }
515
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch MVP model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_mvp import MvpConfig logger = logging.get_logger(__name__) # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->Mvp class MvpLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # Mvp is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0, position_ids: torch.Tensor = None): """`input_ids' shape is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids.shape[:2] position_ids = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ).expand(bsz, -1) else: position_ids = position_ids.unsqueeze(0) return super().forward(position_ids + self.offset) class MvpAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, layer_idx: Optional[bool] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, attn_prompt: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True if attn_prompt is not None: key_states = torch.cat([attn_prompt[0].expand(bsz, -1, -1, -1), key_states], dim=2) value_states = torch.cat([attn_prompt[1].expand(bsz, -1, -1, -1), value_states], dim=2) if attention_mask is not None: prompt_mask = torch.zeros(bsz, 1, tgt_len, attn_prompt[0].size(1)).to(attention_mask.device) attention_mask = torch.cat([prompt_mask, attention_mask], dim=(-1)) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class MvpEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = MvpAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, self_attn_prompt: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, encoder_attention_heads, pro_len, head_dim)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, attn_prompt=self_attn_prompt, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) return hidden_states, attn_weights class MvpDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: MvpConfig, layer_idx=None): super().__init__() self.embed_dim = config.d_model self.self_attn = MvpAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = MvpAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, self_attn_prompt: Optional[torch.Tensor] = None, cross_attn_prompt: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape `(2, decoder_attention_heads, pro_len, head_dim)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, attn_prompt=self_attn_prompt, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, attn_prompt=cross_attn_prompt, past_key_values=past_key_values, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MVP class MvpClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class MvpPrompt(nn.Module): """Layer-wise prompt for encoder or decoder.""" def __init__(self, config, num_layers, num_heads): super().__init__() self.prompt_length = config.prompt_length self.num_layers = num_layers self.num_heads = num_heads self.head_dim = config.d_model // num_heads self.dropout = nn.Dropout(p=config.dropout) self.prompt_embedding = nn.Embedding(config.prompt_length, config.d_model) self.prompt_trans = nn.Sequential( nn.Linear(config.d_model, config.prompt_mid_dim), nn.GELU(), nn.Linear(config.prompt_mid_dim, num_layers * 2 * config.d_model), ) def forward(self, prompt_ids: torch.Tensor) -> tuple[torch.Tensor]: prompt = self.prompt_trans(self.prompt_embedding(prompt_ids)) prompt = prompt.view(self.prompt_length, self.num_layers * 2, self.num_heads, self.head_dim) prompt = self.dropout(prompt) prompt = prompt.permute([1, 2, 0, 3]).split(2) return prompt @auto_docstring class MvpPreTrainedModel(PreTrainedModel): config: MvpConfig base_model_prefix = "model" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs class MvpEncoder(MvpPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`MvpEncoderLayer`]. Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt """ def __init__( self, config: MvpConfig, embed_tokens: Optional[nn.Embedding] = None, use_prompt: Optional[bool] = False ): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = MvpLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([MvpEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.use_prompt = use_prompt if use_prompt: self.prompt_length = config.prompt_length self.self_attn_prompt = MvpPrompt( config, config.encoder_layers, config.encoder_attention_heads, ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # layer-wise prompt if self.use_prompt: prompt_ids = torch.arange(self.prompt_length).to(self.device) self_attn_prompt = self.self_attn_prompt(prompt_ids) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class MvpDecoder(MvpPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`] Args: config: MvpConfig embed_tokens (nn.Embedding): output embedding use_prompt (bool): whether to use prompt """ def __init__( self, config: MvpConfig, embed_tokens: Optional[nn.Embedding] = None, use_prompt: Optional[bool] = False ): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = MvpLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([MvpDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.use_prompt = use_prompt if use_prompt: self.prompt_length = config.prompt_length self.self_attn_prompt = MvpPrompt( config, config.decoder_layers, config.decoder_attention_heads, ) self.cross_attn_prompt = MvpPrompt( config, config.decoder_layers, config.decoder_attention_heads, ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input_ids.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(), DynamicCache()) if encoder_hidden_states is not None else DynamicCache() ) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # embed positions positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # layer-wise prompt if self.use_prompt: prompt_ids = torch.arange(self.prompt_length).to(self.device) self_attn_prompt = self.self_attn_prompt(prompt_ids) cross_attn_prompt = self.cross_attn_prompt(prompt_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None), cross_attn_prompt=(cross_attn_prompt[idx] if self.use_prompt else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring class MvpModel(MvpPreTrainedModel): _keys_to_ignore_on_load_unexpected = ["final_logits_bias"] _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: MvpConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.use_prompt = config.use_prompt self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = MvpEncoder(config, self.shared, config.use_prompt) self.decoder = MvpDecoder(config, self.shared, config.use_prompt) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def set_lightweight_tuning(self): assert self.use_prompt, "If you want to use lightweight tuning, make sure that `use_prompt=True`." self.requires_grad_(False) self.encoder.self_attn_prompt.requires_grad_(True) self.decoder.self_attn_prompt.requires_grad_(True) self.decoder.cross_attn_prompt.requires_grad_(True) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. """ # different to other models, Mvp automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The MVP Model with a language modeling head. Can be used for various text generation tasks. """ ) class MvpForConditionalGeneration(MvpPreTrainedModel, GenerationMixin): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: MvpConfig): super().__init__(config) self.model = MvpModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings( self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True ) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.lm_head.requires_grad_(False) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqLMOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example of summarization: Fine-tuning a model ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForConditionalGeneration.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Summarize: You may want to stick it to your boss and leave your job, but don't do it if these are your reasons.", ... return_tensors="pt", ... ) >>> labels = tokenizer("Bad Reasons To Quit Your Job", return_tensors="pt")["input_ids"] >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... generated_ids = model.generate(**inputs) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @auto_docstring( custom_intro=""" Mvp model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """ ) class MvpForSequenceClassification(MvpPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: MvpConfig, **kwargs): super().__init__(config, **kwargs) self.model = MvpModel(config) self.classification_head = MvpClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.classification_head.requires_grad_(False) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, Seq2SeqSequenceClassifierOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Example of single-label classification: Fine-tuning a model on `num_labels` classes ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForSequenceClassification >>> num_labels = 2 # for example, this is a binary classification task >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForSequenceClassification.from_pretrained("RUCAIBox/mvp", num_labels=num_labels) >>> inputs = tokenizer("Classify: Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor(1) # the real label for inputs >>> loss = model(**inputs, labels=labels).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax() ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @auto_docstring class MvpForQuestionAnswering(MvpPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = MvpModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.qa_outputs.requires_grad_(False) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, Seq2SeqQuestionAnsweringModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: Fine-tuning a model for extrative question answering, and our model also supports generative question answering using `BartForConditionalGeneration` ```python >>> import torch >>> from transformers import AutoTokenizer, MvpForQuestionAnswering >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForQuestionAnswering.from_pretrained("RUCAIBox/mvp") >>> inputs = tokenizer( ... "Answer the following question: Who was Jim Henson? [SEP] Jim Henson was a nice puppet", ... return_tensors="pt", ... ) >>> target_start_index = torch.tensor([18]) >>> target_end_index = torch.tensor([19]) >>> loss = model(**inputs, start_positions=target_start_index, end_positions=target_end_index).loss >>> loss.backward() ``` Inference after the model fine-tuned ```python >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer = tokenizer.decode(predict_answer_tokens) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Mvp class MvpDecoderWrapper(MvpPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = MvpDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class MvpForCausalLM(MvpPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = MvpDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder def set_lightweight_tuning(self): self.model.set_lightweight_tuning() self.lm_head.requires_grad_(False) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, MvpForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("RUCAIBox/mvp") >>> model = MvpForCausalLM.from_pretrained("RUCAIBox/mvp", add_cross_attention=False) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> list(logits.shape) [1, 8, 50267] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) __all__ = [ "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ]
transformers/src/transformers/models/mvp/modeling_mvp.py/0
{ "file_path": "transformers/src/transformers/models/mvp/modeling_mvp.py", "repo_id": "transformers", "token_count": 36362 }
516
# coding=utf-8 # Copyright 2023 NllbMoe Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch NLLB-MoE model.""" import math from typing import Callable, Optional, Union import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( MoEModelOutput, MoEModelOutputWithPastAndCrossAttentions, Seq2SeqMoEModelOutput, Seq2SeqMoEOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_nllb_moe import NllbMoeConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx def load_balancing_loss_func(router_probs: torch.Tensor, expert_indices: torch.Tensor) -> float: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: router_probs (`torch.Tensor`): Probability assigned to each expert per token. Shape: [batch_size, seqeunce_length, num_experts]. expert_indices (`torch.Tensor`): Indices tensor of shape [batch_size, seqeunce_length] identifying the selected expert for a given token. Returns: The auxiliary loss. """ if router_probs is None: return 0 num_experts = router_probs.shape[-1] # cast the expert indices to int64, otherwise one-hot encoding will fail if expert_indices.dtype != torch.int64: expert_indices = expert_indices.to(torch.int64) if len(expert_indices.shape) == 2: expert_indices = expert_indices.unsqueeze(2) expert_mask = torch.nn.functional.one_hot(expert_indices, num_experts) # For a given token, determine if it was routed to a given expert. expert_mask = torch.max(expert_mask, axis=-2).values # cast to float32 otherwise mean will fail expert_mask = expert_mask.to(torch.float32) tokens_per_group_and_expert = torch.mean(expert_mask, axis=-2) router_prob_per_group_and_expert = torch.mean(router_probs, axis=-2) return torch.mean(tokens_per_group_and_expert * router_prob_per_group_and_expert) * (num_experts**2) # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100ScaledWordEmbedding with M2M100->NllbMoe class NllbMoeScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale # Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding class NllbMoeSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward( self, input_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, past_key_values_length: int = 0, ): if input_ids is not None: bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) else: bsz, seq_len = inputs_embeds.size()[:-1] position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, past_key_values_length) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len + past_key_values_length if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, self.weights.shape[-1]).detach() def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_length): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length class NllbMoeTop2Router(nn.Module): """ Router using tokens choose top-2 experts assignment. This router uses the same mechanism as in NLLB-MoE from the fairseq repository. Items are sorted by router_probs and then routed to their choice of expert until the expert's expert_capacity is reached. **There is no guarantee that each token is processed by an expert**, or that each expert receives at least one token. The router combining weights are also returned to make sure that the states that are not updated will be masked. """ def __init__(self, config: NllbMoeConfig): super().__init__() self.num_experts = config.num_experts self.expert_capacity = config.expert_capacity self.classifier = nn.Linear(config.hidden_size, self.num_experts, bias=config.router_bias) self.router_ignore_padding_tokens = config.router_ignore_padding_tokens self.dtype = getattr(torch, config.router_dtype) self.second_expert_policy = config.second_expert_policy self.normalize_router_prob_before_dropping = config.normalize_router_prob_before_dropping self.batch_prioritized_routing = config.batch_prioritized_routing self.moe_eval_capacity_token_fraction = config.moe_eval_capacity_token_fraction def _cast_classifier(self): r""" `bitsandbytes` `Linear8bitLt` layers does not support manual casting Therefore we need to check if they are an instance of the `Linear8bitLt` class by checking special attributes. """ if not (hasattr(self.classifier, "SCB") or hasattr(self.classifier, "CB")): self.classifier = self.classifier.to(self.dtype) def normalize_router_probabilities(self, router_probs, top_1_mask, top_2_mask): top_1_max_probs = (router_probs * top_1_mask).sum(dim=1) top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) denom_s = torch.clamp(top_1_max_probs + top_2_max_probs, min=torch.finfo(router_probs.dtype).eps) top_1_max_probs = top_1_max_probs / denom_s top_2_max_probs = top_2_max_probs / denom_s return top_1_max_probs, top_2_max_probs def route_tokens( self, router_logits: torch.Tensor, input_dtype: torch.dtype = torch.float32, padding_mask: Optional[torch.LongTensor] = None, ) -> tuple: """ Computes the `dispatch_mask` and the `dispatch_weights` for each experts. The masks are adapted to the expert capacity. """ nb_tokens = router_logits.shape[0] # Apply Softmax and cast back to the original `dtype` router_probs = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(input_dtype) top_1_expert_index = torch.argmax(router_probs, dim=-1) top_1_mask = torch.nn.functional.one_hot(top_1_expert_index, num_classes=self.num_experts) if self.second_expert_policy == "sampling": gumbel = torch.distributions.gumbel.Gumbel(0, 1).rsample router_logits += gumbel(router_logits.shape).to(router_logits.device) # replace top_1_expert_index with min values logits_except_top_1 = router_logits.masked_fill(top_1_mask.bool(), float("-inf")) top_2_expert_index = torch.argmax(logits_except_top_1, dim=-1) top_2_mask = torch.nn.functional.one_hot(top_2_expert_index, num_classes=self.num_experts) if self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities( router_probs, top_1_mask, top_2_mask ) if self.second_expert_policy == "random": top_2_max_probs = (router_probs * top_2_mask).sum(dim=1) sampled = (2 * top_2_max_probs) > torch.rand_like(top_2_max_probs.float()) top_2_mask = top_2_mask * sampled.repeat(self.num_experts, 1).transpose(1, 0) if padding_mask is not None and not self.router_ignore_padding_tokens: if len(padding_mask.shape) == 4: # only get the last causal mask padding_mask = padding_mask[:, :, -1, :].reshape(-1)[-nb_tokens:] non_padding = ~padding_mask.bool() top_1_mask = top_1_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) top_2_mask = top_2_mask * non_padding.unsqueeze(-1).to(top_1_mask.dtype) if self.batch_prioritized_routing: # sort tokens based on their routing probability # to make sure important tokens are routed, first importance_scores = -1 * router_probs.max(dim=1)[0] sorted_top_1_mask = top_1_mask[importance_scores.argsort(dim=0)] sorted_cumsum1 = (torch.cumsum(sorted_top_1_mask, dim=0) - 1) * sorted_top_1_mask locations1 = sorted_cumsum1[importance_scores.argsort(dim=0).argsort(dim=0)] sorted_top_2_mask = top_2_mask[importance_scores.argsort(dim=0)] sorted_cumsum2 = (torch.cumsum(sorted_top_2_mask, dim=0) - 1) * sorted_top_2_mask locations2 = sorted_cumsum2[importance_scores.argsort(dim=0).argsort(dim=0)] # Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) else: locations1 = torch.cumsum(top_1_mask, dim=0) - 1 locations2 = torch.cumsum(top_2_mask, dim=0) - 1 # Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(top_1_mask, dim=0, keepdim=True) if not self.training and self.moe_eval_capacity_token_fraction > 0: self.expert_capacity = math.ceil(self.moe_eval_capacity_token_fraction * nb_tokens) else: capacity = 2 * math.ceil(nb_tokens / self.num_experts) self.expert_capacity = capacity if self.expert_capacity is None else self.expert_capacity # Remove locations outside capacity from ( cumsum < capacity = False will not be routed) top_1_mask = top_1_mask * torch.lt(locations1, self.expert_capacity) top_2_mask = top_2_mask * torch.lt(locations2, self.expert_capacity) if not self.normalize_router_prob_before_dropping: top_1_max_probs, top_2_max_probs = self.normalize_router_probabilities( router_probs, top_1_mask, top_2_mask ) # Calculate combine_weights and dispatch_mask gates1 = top_1_max_probs[:, None] * top_1_mask gates2 = top_2_max_probs[:, None] * top_2_mask router_probs = gates1 + gates2 return top_1_mask, router_probs def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.LongTensor] = None) -> tuple: r""" The hidden states are reshaped to simplify the computation of the router probabilities (combining weights for each experts.) Args: hidden_states (`torch.Tensor`): (batch_size, sequence_length, hidden_dim) from which router probabilities are computed. Returns: top_1_mask (`torch.Tensor` of shape (batch_size, sequence_length)): Index tensor of shape [batch_size, sequence_length] corresponding to the expert selected for each token using the top1 probabilities of the router. router_probabilities (`torch.Tensor` of shape (batch_size, sequence_length, nump_experts)): Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each token and expert. Used for routing tokens to experts. router_logits (`torch.Tensor` of shape (batch_size, sequence_length))): Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits. This is used later for computing router z-loss. """ self.input_dtype = hidden_states.dtype batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim) hidden_states = hidden_states.to(self.dtype) self._cast_classifier() router_logits = self.classifier(hidden_states) top_1_mask, router_probs = self.route_tokens(router_logits, self.input_dtype, padding_mask) return top_1_mask, router_probs class NllbMoeDenseActDense(nn.Module): def __init__(self, config: NllbMoeConfig, ffn_dim: int): super().__init__() self.fc1 = nn.Linear(config.d_model, ffn_dim) self.fc2 = nn.Linear(ffn_dim, config.d_model) self.dropout = nn.Dropout(config.activation_dropout) self.act = ACT2FN[config.activation_function] def forward(self, hidden_states): hidden_states = self.fc1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) if ( isinstance(self.fc2.weight, torch.Tensor) and hidden_states.dtype != self.fc2.weight.dtype and (self.fc2.weight.dtype != torch.int8 and self.fc2.weight.dtype != torch.uint8) ): hidden_states = hidden_states.to(self.fc2.weight.dtype) hidden_states = self.fc2(hidden_states) return hidden_states class NllbMoeSparseMLP(nn.Module): r""" Implementation of the NLLB-MoE sparse MLP module. """ def __init__(self, config: NllbMoeConfig, ffn_dim: int, expert_class: nn.Module = NllbMoeDenseActDense): super().__init__() self.router = NllbMoeTop2Router(config) self.moe_token_dropout = config.moe_token_dropout self.token_dropout = nn.Dropout(self.moe_token_dropout) self.num_experts = config.num_experts self.experts = nn.ModuleDict() for idx in range(self.num_experts): self.experts[f"expert_{idx}"] = expert_class(config, ffn_dim) def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor] = False): r""" The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense` (mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a top_2 gating mechanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim) instead of O(num_experts x batch_size x sequence_length x hidden_dim). 1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length, num_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the `router_mask`. 2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the contribution of each experts when updating the masked hidden states. Args: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): The hidden states padding_mask (`torch.Tensor`, *optional*, defaults to `False`): Attention mask. Can be in the causal form or not. Returns: hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`): Updated hidden states router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`): Needed for computing the loss """ batch_size, sequence_length, hidden_dim = hidden_states.shape top_1_mask, router_probs = self.router(hidden_states, padding_mask) router_mask = router_probs.bool() hidden_states = hidden_states.reshape((batch_size * sequence_length), hidden_dim) masked_hidden_states = torch.einsum("bm,be->ebm", hidden_states, router_mask) for idx, expert in enumerate(self.experts.values()): token_indices = router_mask[:, idx] combining_weights = router_probs[token_indices, idx] expert_output = expert(masked_hidden_states[idx, token_indices]) if self.moe_token_dropout > 0: if self.training: expert_output = self.token_dropout(expert_output) else: expert_output *= 1 - self.moe_token_dropout masked_hidden_states[idx, token_indices] = torch.einsum("b,be->be", combining_weights, expert_output) hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim) top_1_expert_index = torch.argmax(top_1_mask, dim=-1) return hidden_states, (router_probs, top_1_expert_index) # Copied from transformers.models.bart.modeling_bart.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): if scaling is None: scaling = query.size(-1) ** -0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenAttention with Musicgen->NllbMoe,key_value_states->encoder_hidden_states class NllbMoeAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, is_causal: Optional[bool] = False, config: Optional[NllbMoeConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if encoder_hidden_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = encoder_hidden_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = encoder_hidden_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_layer from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = encoder_hidden_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights class NllbMoeEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool = False): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config, ) self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.encoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.encoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, output_router_logits: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: # router_states set to None to track which layers have None gradients. hidden_states, router_states = self.ffn(hidden_states), None hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if output_router_logits: outputs += (router_states,) return outputs class NllbMoeDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: NllbMoeConfig, is_sparse: bool = False, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.d_model self.is_sparse = is_sparse self.self_attn = NllbMoeAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.attn_dropout = nn.Dropout(config.dropout) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.cross_attention = NllbMoeAttention( self.embed_dim, config.decoder_attention_heads, config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx, ) self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim) if not self.is_sparse: self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim) else: self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim) self.ff_layer_norm = nn.LayerNorm(config.d_model) self.ff_dropout = nn.Dropout(config.activation_dropout) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, output_router_logits: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.cross_attention_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.cross_attention( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self.attn_dropout(hidden_states) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.ff_layer_norm(hidden_states) if self.is_sparse: hidden_states, router_states = self.ffn(hidden_states, attention_mask) else: hidden_states, router_states = self.ffn(hidden_states), None hidden_states = self.ff_dropout(hidden_states) hidden_states = residual + hidden_states # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if output_router_logits: outputs += (router_states,) return outputs @auto_docstring class NllbMoePreTrainedModel(PreTrainedModel): config: NllbMoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["NllbMoeEncoderLayer", "NllbMoeDecoderLayer"] # TODO: If anyone is up to it to make sure tests pass etc # Flash attention has problems due to not preparing masks the same way as eager/sdpa # SDPA has more flaky logits which requires more time to look into tests _supports_flash_attn = False _supports_sdpa = False _supports_flex_attn = False def _init_weights(self, module: nn.Module): """Initialize the weights""" std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() class NllbMoeEncoder(NllbMoePreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`NllbMoeEncoderLayer`]. Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding( config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) sparse_step = config.encoder_sparse_step self.layers = nn.ModuleList() for i in range(config.encoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeEncoderLayer(config, is_sparse)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input_ids, inputs_embeds) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = self._update_full_mask( attention_mask, inputs_embeds, ) encoder_states = () if output_hidden_states else None all_router_probs = () if output_router_logits else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, output_router_logits=output_router_logits, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_router_logits: all_router_probs += (layer_outputs[-1],) last_hidden_state = self.layer_norm(hidden_states) if output_hidden_states: encoder_states += (last_hidden_state,) if not return_dict: return tuple( v for v in [last_hidden_state, encoder_states, all_attentions, all_router_probs] if v is not None ) return MoEModelOutput( last_hidden_state=last_hidden_state, hidden_states=encoder_states, attentions=all_attentions, router_probs=all_router_probs, ) # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_full_mask def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask class NllbMoeDecoder(NllbMoePreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`NllbMoeDecoderLayer`] Args: config: NllbMoeConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: NllbMoeConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = NllbMoeScaledWordEmbedding( config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale ) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = NllbMoeSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, ) sparse_step = config.decoder_sparse_step self.layers = nn.ModuleList() for i in range(config.decoder_layers): is_sparse = (i + 1) % sparse_step == 0 if sparse_step > 0 else False self.layers.append(NllbMoeDecoderLayer(config, is_sparse, layer_idx=i)) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = True, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. output_router_logits (`bool`, *optional*): Whether or not to return the logits of all the routers. They are useful for computing the router loss, and should not be returned during inference. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = self._update_causal_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) encoder_attention_mask = self._update_cross_attn_mask( encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds, ) # embed positions positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_router_probs = () if output_router_logits else None all_cross_attentions = () if output_attentions else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.layerdrop if not skip_the_layer or synced_gpus: layer_head_mask = head_mask[idx] if head_mask is not None else None cross_attn_layer_head_mask = cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_router_logits=output_router_logits, cache_position=cache_position, ) hidden_states = layer_outputs[0] if skip_the_layer: continue if output_attentions: all_self_attns += (layer_outputs[1],) all_cross_attentions += (layer_outputs[2],) if output_router_logits: all_router_probs += (layer_outputs[-1],) hidden_states = self.layer_norm(hidden_states) # Add last layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions, all_router_probs, ] if v is not None ) return MoEModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, router_probs=all_router_probs, ) # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int, ): if self.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, inputs_embeds, past_key_values_length, ) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) # Other attention flavors support in-built causal (when `mask is None`) # while we need to create our specific block mask regardless elif attention_mask is None: attention_mask = make_flex_block_causal_mask( torch.ones( size=(input_shape), device=inputs_embeds.device, ) ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) return attention_mask # Copied from transformers.models.musicgen.modeling_musicgen.MusicgenDecoder._update_cross_attn_mask def _update_cross_attn_mask( self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, ): # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], ) elif self.config._attn_implementation == "flex_attention": if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask( encoder_attention_mask, query_length=input_shape[-1], is_causal=False, ) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) return encoder_attention_mask @auto_docstring class NllbMoeModel(NllbMoePreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: NllbMoeConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.shared = NllbMoeScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale) self.encoder = NllbMoeEncoder(config, self.shared) self.decoder = NllbMoeDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def _tie_weights(self): if self.config.tie_word_embeddings: self._tie_or_clone_weights(self.encoder.embed_tokens, self.shared) self._tie_or_clone_weights(self.decoder.embed_tokens, self.shared) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = True, ) -> Union[tuple[torch.Tensor], Seq2SeqMoEModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. Example: ```python >>> from transformers import AutoTokenizer, NllbMoeModel >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> model = SwitchTransformersModel.from_pretrained("hf-internal-testing/random-nllb-moe-2-experts") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for NllbMoeModel >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, MoEModelOutput): encoder_outputs = MoEModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, router_probs=encoder_outputs[3] if len(encoder_outputs) > 3 else None, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqMoEModelOutput( past_key_values=decoder_outputs.past_key_values, cross_attentions=decoder_outputs.cross_attentions, last_hidden_state=decoder_outputs.last_hidden_state, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, decoder_hidden_states=decoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, decoder_attentions=decoder_outputs.attentions, encoder_router_logits=encoder_outputs.router_probs, decoder_router_logits=decoder_outputs.router_probs, ) @auto_docstring( custom_intro=""" The NllbMoe Model with a language modeling head. Can be used for summarization. """ ) class NllbMoeForConditionalGeneration(NllbMoePreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: NllbMoeConfig): super().__init__(config) self.model = NllbMoeModel(config) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.router_z_loss_coef = config.router_z_loss_coef self.router_aux_loss_coef = config.router_aux_loss_coef # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_router_logits: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], Seq2SeqMoEOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) NllbMoe uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example Translation: ```python >>> from transformers import AutoTokenizer, NllbMoeForConditionalGeneration >>> model = NllbMoeForConditionalGeneration.from_pretrained("facebook/nllb-moe-54b") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b") >>> text_to_translate = "Life is like a box of chocolates" >>> model_inputs = tokenizer(text_to_translate, return_tensors="pt") >>> # translate to French >>> gen_tokens = model.generate(**model_inputs, forced_bos_token_id=tokenizer.get_lang_id("eng_Latn")) >>> print(tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)) ``` """ return_dict = return_dict if return_dict is not None else self.config.return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_router_logits = ( output_router_logits if output_router_logits is not None else self.config.output_router_logits ) if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) loss = None encoder_aux_loss = None decoder_aux_loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # todo check in the config if router loss enables if output_router_logits: encoder_router_logits = outputs[-1] decoder_router_logits = outputs[3 if output_attentions else 4] # Compute the router loss (z_loss + auxiliary loss) for each router in the encoder and decoder encoder_router_logits, encoder_expert_indexes = self._unpack_router_logits(encoder_router_logits) encoder_aux_loss = load_balancing_loss_func(encoder_router_logits, encoder_expert_indexes) decoder_router_logits, decoder_expert_indexes = self._unpack_router_logits(decoder_router_logits) decoder_aux_loss = load_balancing_loss_func(decoder_router_logits, decoder_expert_indexes) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if output_router_logits and labels is not None: aux_loss = self.router_aux_loss_coef * (encoder_aux_loss + decoder_aux_loss) loss = loss + aux_loss output = (loss,) if loss is not None else () if not return_dict: output += (lm_logits,) if output_router_logits: # only return the loss if they are not None output += ( encoder_aux_loss, decoder_aux_loss, *outputs[1:], ) else: output += outputs[1:] return output return Seq2SeqMoEOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, cross_attentions=outputs.cross_attentions, encoder_aux_loss=encoder_aux_loss, decoder_aux_loss=decoder_aux_loss, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, decoder_hidden_states=outputs.decoder_hidden_states, encoder_attentions=outputs.encoder_attentions, decoder_attentions=outputs.decoder_attentions, encoder_router_logits=outputs.encoder_router_logits, decoder_router_logits=outputs.decoder_router_logits, ) def _unpack_router_logits(self, router_outputs): total_router_logits = [] total_expert_indexes = [] for router_output in router_outputs: if router_output is not None: router_logits, expert_indexes = router_output total_router_logits.append(router_logits) total_expert_indexes.append(expert_indexes) total_router_logits = torch.cat(total_router_logits, dim=1) if len(total_router_logits) > 0 else None total_expert_indexes = torch.stack(total_expert_indexes, dim=1) if len(total_expert_indexes) > 0 else None return total_router_logits, total_expert_indexes __all__ = [ "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ]
transformers/src/transformers/models/nllb_moe/modeling_nllb_moe.py/0
{ "file_path": "transformers/src/transformers/models/nllb_moe/modeling_nllb_moe.py", "repo_id": "transformers", "token_count": 36412 }
517
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OneFormer checkpoints from the original repository. URL: https://github.com/SHI-Labs/OneFormer""" import os import sys from argparse import ArgumentParser from collections.abc import Iterator from dataclasses import dataclass from pathlib import Path from pprint import pformat from typing import Any import requests import torch import torchvision.transforms as T from PIL import Image from torch import Tensor, nn try: from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog from detectron2.projects.deeplab import add_deeplab_config except ImportError: pass from transformers import CLIPTokenizer, DinatConfig, SwinConfig from transformers.models.oneformer.image_processing_oneformer import OneFormerImageProcessor from transformers.models.oneformer.modeling_oneformer import ( OneFormerConfig, OneFormerForUniversalSegmentation, OneFormerForUniversalSegmentationOutput, OneFormerModel, OneFormerModelOutput, ) from transformers.models.oneformer.processing_oneformer import OneFormerProcessor from transformers.utils import logging StateDict = dict[str, Tensor] logging.set_verbosity_info() logger = logging.get_logger() torch.manual_seed(0) class TrackedStateDict: def __init__(self, to_track: dict): """This class "tracks" a python dictionary by keeping track of which item is accessed. Args: to_track (Dict): The dictionary we wish to track """ self.to_track = to_track self._seen: set[str] = set() def __getitem__(self, key: str) -> Any: return self.to_track[key] def __setitem__(self, key: str, item: Any): self._seen.add(key) self.to_track[key] = item def diff(self) -> list[str]: """This method returns a set difference between the keys in the tracked state dict and the one we have access so far. This is an effective method to check if we have update all the keys Returns: list[str]: List of keys not yet updated """ return set(self.to_track.keys()) - self._seen def copy(self) -> dict: # proxy the call to the internal dictionary return self.to_track.copy() # Image to verify the result def prepare_img(): url = "https://praeclarumjj3.github.io/files/coco.jpeg" img_data = requests.get(url, stream=True).raw im = Image.open(img_data) return im @dataclass class Args: """Fake command line arguments needed by oneformer/detectron2 implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_common_config(cfg) add_oneformer_config(cfg) add_swin_config(cfg) add_dinat_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg class OriginalOneFormerConfigToOursConverter: def __call__(self, original_config: object, is_swin: bool) -> OneFormerConfig: model = original_config.MODEL dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) id2label = dict(enumerate(dataset_catalog.stuff_classes)) label2id = {label: idx for idx, label in id2label.items()} if is_swin: if model.SWIN.EMBED_DIM == 96: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-tiny-patch4-window7-224", drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=["stage1", "stage2", "stage3", "stage4"], ) elif model.SWIN.EMBED_DIM == 192: backbone_config = SwinConfig.from_pretrained( "microsoft/swin-large-patch4-window12-384", drop_path_rate=model.SWIN.DROP_PATH_RATE, out_features=["stage1", "stage2", "stage3", "stage4"], ) else: raise ValueError(f"embed dim {model.SWIN.EMBED_DIM} not supported for Swin!") else: backbone_config = DinatConfig.from_pretrained( "shi-labs/dinat-large-11x11-in22k-in1k-384", dilations=model.DiNAT.DILATIONS, kernel_size=model.DiNAT.KERNEL_SIZE, out_features=["stage1", "stage2", "stage3", "stage4"], ) config: OneFormerConfig = OneFormerConfig( backbone_config=backbone_config, output_attentions=True, output_hidden_states=True, return_dict=True, ignore_value=model.SEM_SEG_HEAD.IGNORE_VALUE, num_classes=model.SEM_SEG_HEAD.NUM_CLASSES, num_queries=model.ONE_FORMER.NUM_OBJECT_QUERIES, no_object_weight=model.ONE_FORMER.NO_OBJECT_WEIGHT, class_weight=model.ONE_FORMER.CLASS_WEIGHT, mask_weight=model.ONE_FORMER.MASK_WEIGHT, dice_weight=model.ONE_FORMER.DICE_WEIGHT, contrastive_weight=model.ONE_FORMER.CONTRASTIVE_WEIGHT, contrastive_temperature=model.ONE_FORMER.CONTRASTIVE_TEMPERATURE, train_num_points=model.ONE_FORMER.TRAIN_NUM_POINTS, oversample_ratio=model.ONE_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=model.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO, init_std=0.02, init_xavier_std=1.0, layer_norm_eps=1e-05, is_training=False, use_auxiliary_loss=model.ONE_FORMER.DEEP_SUPERVISION, output_auxiliary_logits=True, strides=[4, 8, 16, 32], task_seq_len=original_config.INPUT.TASK_SEQ_LEN, max_seq_len=original_config.INPUT.MAX_SEQ_LEN, text_encoder_width=model.TEXT_ENCODER.WIDTH, text_encoder_context_length=model.TEXT_ENCODER.CONTEXT_LENGTH, text_encoder_num_layers=model.TEXT_ENCODER.NUM_LAYERS, text_encoder_vocab_size=model.TEXT_ENCODER.VOCAB_SIZE, text_encoder_proj_layers=model.TEXT_ENCODER.PROJ_NUM_LAYERS, text_encoder_n_ctx=model.TEXT_ENCODER.N_CTX, conv_dim=model.SEM_SEG_HEAD.CONVS_DIM, mask_dim=model.SEM_SEG_HEAD.MASK_DIM, hidden_dim=model.ONE_FORMER.HIDDEN_DIM, norm=model.SEM_SEG_HEAD.NORM, encoder_layers=model.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS, encoder_feedforward_dim=1024, decoder_layers=model.ONE_FORMER.DEC_LAYERS, use_task_norm=model.ONE_FORMER.USE_TASK_NORM, num_attention_heads=model.ONE_FORMER.NHEADS, dropout=model.ONE_FORMER.DROPOUT, dim_feedforward=model.ONE_FORMER.DIM_FEEDFORWARD, pre_norm=model.ONE_FORMER.PRE_NORM, enforce_input_proj=model.ONE_FORMER.ENFORCE_INPUT_PROJ, query_dec_layers=model.ONE_FORMER.CLASS_DEC_LAYERS, common_stride=model.SEM_SEG_HEAD.COMMON_STRIDE, id2label=id2label, label2id=label2id, ) return config class OriginalOneFormerConfigToProcessorConverter: def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor: model = original_config.MODEL model_input = original_config.INPUT dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) if "ade20k" in model_repo: class_info_file = "ade20k_panoptic.json" elif "coco" in model_repo: class_info_file = "coco_panoptic.json" elif "cityscapes" in model_repo: class_info_file = "cityscapes_panoptic.json" else: raise ValueError("Invalid Dataset!") image_processor = OneFormerImageProcessor( image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(), image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(), size=model_input.MIN_SIZE_TEST, max_size=model_input.MAX_SIZE_TEST, num_labels=model.SEM_SEG_HEAD.NUM_CLASSES, ignore_index=dataset_catalog.ignore_label, class_info_file=class_info_file, ) tokenizer = CLIPTokenizer.from_pretrained(model_repo) return OneFormerProcessor( image_processor=image_processor, tokenizer=tokenizer, task_seq_length=original_config.INPUT.TASK_SEQ_LEN, max_seq_length=original_config.INPUT.MAX_SEQ_LEN, ) class OriginalOneFormerCheckpointToOursConverter: def __init__(self, original_model: nn.Module, config: OneFormerConfig): self.original_model = original_model self.config = config def pop_all(self, renamed_keys: list[tuple[str, str]], dst_state_dict: StateDict, src_state_dict: StateDict): for src_key, dst_key in renamed_keys: dst_state_dict[dst_key] = src_state_dict.pop(src_key) # Swin Backbone def replace_swin_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" renamed_keys = [ ( f"{src_prefix}.patch_embed.proj.weight", f"{dst_prefix}.embeddings.patch_embeddings.projection.weight", ), (f"{src_prefix}.patch_embed.proj.bias", f"{dst_prefix}.embeddings.patch_embeddings.projection.bias"), (f"{src_prefix}.patch_embed.norm.weight", f"{dst_prefix}.embeddings.norm.weight"), (f"{src_prefix}.patch_embed.norm.bias", f"{dst_prefix}.embeddings.norm.bias"), ] num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( [ # src, dst ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_before.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_bias_table", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_bias_table", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.proj.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.output.dense.bias", ), ] ) # second norm renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.norm2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.layernorm_after.bias", ), ] ) # mlp renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc1.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.intermediate.dense.bias", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.weight", ), ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.mlp.fc2.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.output.dense.bias", ), ] ) renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.blocks.{block_idx}.attn.relative_position_index", f"{dst_prefix}.encoder.layers.{layer_idx}.blocks.{block_idx}.attention.self.relative_position_index", ) ] ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.layers.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.layers.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.layers.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Dinat Backbone def replace_dinat_backbone(self, dst_state_dict: StateDict, src_state_dict: StateDict, config: OneFormerConfig): dst_prefix: str = "pixel_level_module.encoder" src_prefix: str = "backbone" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = rename_keys_for_weight_bias(f"{src_prefix}.patch_embed.norm", f"{dst_prefix}.embeddings.norm") for i in range(2): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.patch_embed.proj.{i}", f"{dst_prefix}.embeddings.patch_embeddings.projection.{i}", ) ) num_layers = len(config.backbone_config.depths) for layer_idx in range(num_layers): for block_idx in range(config.backbone_config.depths[layer_idx]): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm1", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_before", ) ) renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.norm2", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.layernorm_after", ) ) renamed_keys.extend( [ # src, dst ( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.rpb", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.rpb", ), ] ) # now we need to handle the attentions # read in weights + bias of input projection layer of cross-attention src_att_weight = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight"] src_att_bias = src_state_dict[f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias"] size = src_att_weight.shape[0] offset = size // 3 dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.weight" ] = src_att_weight[:offset, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.query.bias" ] = src_att_bias[:offset] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.weight" ] = src_att_weight[offset : offset * 2, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.key.bias" ] = src_att_bias[offset : offset * 2] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.weight" ] = src_att_weight[-offset:, :] dst_state_dict[ f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.self.value.bias" ] = src_att_bias[-offset:] # let's pop them src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.weight") src_state_dict.pop(f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.qkv.bias") # proj renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.attn.proj", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.attention.output.dense", ) ) # mlp renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc1", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.intermediate.dense", ) ) renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.levels.{layer_idx}.blocks.{block_idx}.mlp.fc2", f"{dst_prefix}.encoder.levels.{layer_idx}.layers.{block_idx}.output.dense", ) ) if layer_idx < num_layers - 1: # patch merging renamed_keys.extend( [ ( f"{src_prefix}.levels.{layer_idx}.downsample.reduction.weight", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.reduction.weight", ), ( f"{src_prefix}.levels.{layer_idx}.downsample.norm.weight", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.weight", ), ( f"{src_prefix}.levels.{layer_idx}.downsample.norm.bias", f"{dst_prefix}.encoder.levels.{layer_idx}.downsample.norm.bias", ), ] ) # hidden states norms renamed_keys.extend( [ ( f"{src_prefix}.norm{layer_idx}.weight", f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.weight", ), ( f"{src_prefix}.norm{layer_idx}.bias", f"{dst_prefix}.hidden_states_norms.stage{layer_idx + 1}.bias", ), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Backbone + Pixel Decoder def replace_pixel_module(self, dst_state_dict: StateDict, src_state_dict: StateDict, is_swin: bool): dst_prefix: str = "pixel_level_module.decoder" src_prefix: str = "sem_seg_head.pixel_decoder" if is_swin: self.replace_swin_backbone(dst_state_dict, src_state_dict, self.config) else: self.replace_dinat_backbone(dst_state_dict, src_state_dict, self.config) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): self_attn_keys = [] self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.attention_weights", f"{dst_prefix}.attention_weights") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.output_proj", f"{dst_prefix}.output_proj") ) self_attn_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.sampling_offsets", f"{dst_prefix}.sampling_offsets") ) self_attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.value_proj", f"{dst_prefix}.value_proj")) return self_attn_keys def rename_keys_for_encoder_layer(src_prefix: str, dst_prefix: str): encoder_keys = [] encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.fc1")) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.fc2")) encoder_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.self_attn_layer_norm") ) encoder_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.final_layer_norm")) encoder_keys.extend(rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn")) return encoder_keys # convolution layer for final features renamed_keys = [ (f"{src_prefix}.adapter_1.weight", f"{dst_prefix}.adapter_1.0.weight"), (f"{src_prefix}.adapter_1.norm.weight", f"{dst_prefix}.adapter_1.1.weight"), (f"{src_prefix}.adapter_1.norm.bias", f"{dst_prefix}.adapter_1.1.bias"), ] renamed_keys.extend( [ (f"{src_prefix}.layer_1.weight", f"{dst_prefix}.layer_1.0.weight"), (f"{src_prefix}.layer_1.norm.weight", f"{dst_prefix}.layer_1.1.weight"), (f"{src_prefix}.layer_1.norm.bias", f"{dst_prefix}.layer_1.1.bias"), ] ) # proj layers for i in range(3): for j in range(2): renamed_keys.extend( [ (f"{src_prefix}.input_proj.{i}.{j}.weight", f"{dst_prefix}.input_projections.{i}.{j}.weight"), (f"{src_prefix}.input_proj.{i}.{j}.bias", f"{dst_prefix}.input_projections.{i}.{j}.bias"), ] ) renamed_keys.extend([(f"{src_prefix}.transformer.level_embed", f"{dst_prefix}.level_embed")]) # layers for layer_idx in range(self.config.encoder_layers): renamed_keys.extend( rename_keys_for_encoder_layer( f"{src_prefix}.transformer.encoder.layers.{layer_idx}", f"{dst_prefix}.encoder.layers.{layer_idx}" ) ) # proj renamed_keys.extend( [ (f"{src_prefix}.mask_features.weight", f"{dst_prefix}.mask_projection.weight"), (f"{src_prefix}.mask_features.bias", f"{dst_prefix}.mask_projection.bias"), ] ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) # Transformer Decoder def replace_keys_qkv_transformer_decoder(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module.decoder.layers" src_prefix: str = "sem_seg_head.predictor" for i in range(self.config.decoder_layers - 1): # read in weights + bias of input projection layer of self-attention in_proj_weight = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_weight" ) in_proj_bias = src_state_dict.pop( f"{src_prefix}.transformer_self_attention_layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.weight"] = in_proj_weight[:256, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.q_proj.bias"] = in_proj_bias[:256] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.k_proj.bias"] = in_proj_bias[256:512] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] dst_state_dict[f"{dst_prefix}.{i}.self_attn.self_attn.v_proj.bias"] = in_proj_bias[-256:] def replace_transformer_module(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "transformer_module" src_prefix: str = "sem_seg_head.predictor" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_attn(src_prefix: str, dst_prefix: str): attn_keys = [ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), ] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_self_attn(src_prefix: str, dst_prefix: str): attn_keys = [] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_query_transformer_layer(src_prefix: str, dst_prefix: str): query_transformer_layer_keys = [] query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm1", f"{dst_prefix}.norm1") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm2", f"{dst_prefix}.norm2") ) query_transformer_layer_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.norm3", f"{dst_prefix}.norm3") ) query_transformer_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") ) query_transformer_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") ) return query_transformer_layer_keys def rename_keys_for_cross_attn_layer(src_prefix: str, dst_prefix: str): cross_attn_layer_keys = [] cross_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) cross_attn_layer_keys.extend( rename_keys_for_attn(f"{src_prefix}.multihead_attn", f"{dst_prefix}.multihead_attn") ) return cross_attn_layer_keys def rename_keys_for_self_attn_layer(src_prefix: str, dst_prefix: str): self_attn_layer_keys = [] self_attn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) self_attn_layer_keys.extend( rename_keys_for_self_attn(f"{src_prefix}.self_attn", f"{dst_prefix}.self_attn") ) return self_attn_layer_keys def rename_keys_for_ffn_layer(src_prefix: str, dst_prefix: str): ffn_layer_keys = [] ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear1", f"{dst_prefix}.linear1")) ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.linear2", f"{dst_prefix}.linear2")) ffn_layer_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.norm", f"{dst_prefix}.norm")) return ffn_layer_keys def rename_keys_for_transformer_decoder_layer(src_prefix: str, dst_prefix: str, idx: int): transformer_decoder_layer_keys = [] transformer_decoder_layer_keys.extend( rename_keys_for_cross_attn_layer( f"{src_prefix}.transformer_cross_attention_layers.{idx}", f"{dst_prefix}.{idx}.cross_attn" ) ) transformer_decoder_layer_keys.extend( rename_keys_for_self_attn_layer( f"{src_prefix}.transformer_self_attention_layers.{idx}", f"{dst_prefix}.{idx}.self_attn" ) ) transformer_decoder_layer_keys.extend( rename_keys_for_ffn_layer(f"{src_prefix}.transformer_ffn_layers.{idx}", f"{dst_prefix}.{idx}.ffn") ) return transformer_decoder_layer_keys # positional embedding for object queries renamed_keys = [ (f"{src_prefix}.query_embed.weight", f"{dst_prefix}.queries_embedder.weight"), (f"{src_prefix}.level_embed.weight", f"{dst_prefix}.level_embed.weight"), ] # norm renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.decoder_norm", f"{dst_prefix}.decoder.decoder_norm") ) # proj renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.class_input_proj", f"{dst_prefix}.decoder.query_input_projection" ) ) renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.class_embed", f"{dst_prefix}.decoder.class_embed") ) for i in range(3): renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.mask_embed.layers.{i}", f"{dst_prefix}.decoder.mask_embed.layers.{i}.0" ) ) # norm renamed_keys.extend( rename_keys_for_weight_bias( f"{src_prefix}.class_transformer.decoder.norm", f"{dst_prefix}.decoder.query_transformer.decoder.norm" ) ) # transformer to update queries with task tokens for i in range(self.config.query_dec_layers): renamed_keys.extend( rename_keys_for_query_transformer_layer( f"{src_prefix}.class_transformer.decoder.layers.{i}", f"{dst_prefix}.decoder.query_transformer.decoder.layers.{i}", ) ) # decoder layers for i in range(self.config.decoder_layers - 1): renamed_keys.extend( rename_keys_for_transformer_decoder_layer( f"{src_prefix}", f"{dst_prefix}.decoder.layers", i, ) ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) self.replace_keys_qkv_transformer_decoder(dst_state_dict, src_state_dict) def replace_task_mlp(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "task_encoder" src_prefix: str = "task_mlp" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = [] for i in range(2): renamed_keys.extend( rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.task_mlp.layers.{i}.0") ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_text_projector(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "text_mapper.text_projector" src_prefix: str = "text_projector" def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] renamed_keys = [] for i in range(self.config.text_encoder_config["text_encoder_proj_layers"]): renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.layers.{i}", f"{dst_prefix}.{i}.0")) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def replace_text_mapper(self, dst_state_dict: StateDict, src_state_dict: StateDict): dst_prefix: str = "text_mapper.text_encoder" src_prefix: str = "text_encoder" self.replace_text_projector(dst_state_dict, src_state_dict) def rename_keys_for_weight_bias(src_prefix: str, dst_prefix: str): return [ (f"{src_prefix}.weight", f"{dst_prefix}.weight"), (f"{src_prefix}.bias", f"{dst_prefix}.bias"), ] def rename_keys_for_attn(src_prefix: str, dst_prefix: str): attn_keys = [ (f"{src_prefix}.in_proj_bias", f"{dst_prefix}.in_proj_bias"), (f"{src_prefix}.in_proj_weight", f"{dst_prefix}.in_proj_weight"), ] attn_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.out_proj", f"{dst_prefix}.out_proj")) return attn_keys def rename_keys_for_layer(src_prefix: str, dst_prefix: str): resblock_keys = [] resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_fc", f"{dst_prefix}.mlp.fc1")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.mlp.c_proj", f"{dst_prefix}.mlp.fc2")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_1", f"{dst_prefix}.layer_norm1")) resblock_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_2", f"{dst_prefix}.layer_norm2")) resblock_keys.extend(rename_keys_for_attn(f"{src_prefix}.attn", f"{dst_prefix}.self_attn")) return resblock_keys renamed_keys = [ ("prompt_ctx.weight", "text_mapper.prompt_ctx.weight"), ] renamed_keys.extend( [ (f"{src_prefix}.positional_embedding", f"{dst_prefix}.positional_embedding"), (f"{src_prefix}.token_embedding.weight", f"{dst_prefix}.token_embedding.weight"), ] ) renamed_keys.extend(rename_keys_for_weight_bias(f"{src_prefix}.ln_final", f"{dst_prefix}.ln_final")) for i in range(self.config.text_encoder_config["text_encoder_num_layers"]): renamed_keys.extend( rename_keys_for_layer( f"{src_prefix}.transformer.resblocks.{i}", f"{dst_prefix}.transformer.layers.{i}" ) ) self.pop_all(renamed_keys, dst_state_dict, src_state_dict) def convert(self, oneformer: OneFormerModel, is_swin: bool) -> OneFormerModel: dst_state_dict = TrackedStateDict(oneformer.state_dict()) src_state_dict = self.original_model.state_dict() self.replace_pixel_module(dst_state_dict, src_state_dict, is_swin) self.replace_transformer_module(dst_state_dict, src_state_dict) self.replace_task_mlp(dst_state_dict, src_state_dict) if self.config.is_training: self.replace_text_mapper(dst_state_dict, src_state_dict) logger.info(f"Missed keys are {pformat(dst_state_dict.diff())}") logger.info(f"Not copied keys are {pformat(src_state_dict.keys())}") logger.info("🙌 Done") oneformer.load_state_dict(dst_state_dict) return oneformer @staticmethod def using_dirs(checkpoints_dir: Path, config_dir: Path) -> Iterator[tuple[object, Path, Path]]: checkpoints: list[Path] = checkpoints_dir.glob("**/*.pth") for checkpoint in checkpoints: logger.info(f"💪 Converting {checkpoint.stem}") # find associated config file config: Path = config_dir / f"{checkpoint.stem}.yaml" yield config, checkpoint def post_process_sem_seg_output(outputs: OneFormerForUniversalSegmentationOutput, target_size: tuple[int, int]): # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=target_size, mode="bilinear", align_corners=False, ) # remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_probs = masks_queries_logits.sigmoid() # now we want to sum over the queries, # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ # where $ softmax(p) \in R^{q, c} $ is the mask classes # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) return segmentation def test( original_model, our_model: OneFormerForUniversalSegmentation, processor: OneFormerProcessor, model_repo: str, ): def _preprocess_text(text_list=None, max_length=77): if text_list is None: raise ValueError("tokens cannot be None.") tokens = tokenizer(text_list, padding="max_length", max_length=max_length, truncation=True) attention_masks, input_ids = tokens["attention_mask"], tokens["input_ids"] token_inputs = [] for attn_mask, input_id in zip(attention_masks, input_ids): token = torch.tensor(attn_mask) * torch.tensor(input_id) token_inputs.append(token.unsqueeze(0)) token_inputs = torch.cat(token_inputs, dim=0) return token_inputs with torch.no_grad(): tokenizer = CLIPTokenizer.from_pretrained(model_repo) original_model = original_model.eval() our_model = our_model.eval() im = prepare_img() tr = T.Compose( [ T.Resize((640, 640)), T.ToTensor(), T.Normalize( mean=torch.tensor([123.675, 116.280, 103.530]) / 255.0, std=torch.tensor([58.395, 57.120, 57.375]) / 255.0, ), ], ) x = tr(im).unsqueeze(0) task_input = ["the task is semantic"] task_token = _preprocess_text(task_input, max_length=processor.task_seq_length) original_model_backbone_features = original_model.backbone(x.clone()) our_model_output: OneFormerModelOutput = our_model.model(x.clone(), task_token, output_hidden_states=True) for original_model_feature, our_model_feature in zip( original_model_backbone_features.values(), our_model_output.encoder_hidden_states ): assert torch.allclose(original_model_feature, our_model_feature, atol=3e-3), ( "The backbone features are not the same." ) mask_features, _, multi_scale_features, _, _ = original_model.sem_seg_head.pixel_decoder.forward_features( original_model_backbone_features ) original_pixel_decoder_features = [] original_pixel_decoder_features.append(mask_features) for i in range(len(multi_scale_features)): original_pixel_decoder_features.append(multi_scale_features[i]) for original_model_feature, our_model_feature in zip( original_pixel_decoder_features, our_model_output.pixel_decoder_hidden_states ): assert torch.allclose(original_model_feature, our_model_feature, atol=3e-4), ( "The pixel decoder feature are not the same" ) tr_complete = T.Compose( [ T.Resize((640, 640)), T.ToTensor(), ], ) y = (tr_complete(im) * 255.0).to(torch.int).float() # let's test the full model original_model_out = original_model([{"image": y.clone(), "task": "The task is semantic"}]) original_segmentation = original_model_out[0]["sem_seg"] our_model_out: OneFormerForUniversalSegmentationOutput = our_model( x.clone(), task_token, output_hidden_states=True ) our_segmentation = post_process_sem_seg_output(our_model_out, target_size=(640, 640))[0] assert torch.allclose(original_segmentation, our_segmentation, atol=1e-3), ( "The segmentation image is not the same." ) logger.info("✅ Test passed!") def get_name(checkpoint_file: Path): model_name_raw: str = checkpoint_file.stem backbone = "swin" if "swin" in model_name_raw else "dinat" dataset = "" if "coco" in model_name_raw: dataset = "coco" elif "ade20k" in model_name_raw: dataset = "ade20k" elif "cityscapes" in model_name_raw: dataset = "cityscapes" else: raise ValueError( f"{model_name_raw} must be wrong since we didn't find 'coco' or 'ade20k' or 'cityscapes' in it " ) backbone_types = ["tiny", "large"] backbone_type = list(filter(lambda x: x in model_name_raw, backbone_types))[0] model_name = f"oneformer_{dataset}_{backbone}_{backbone_type}" return model_name if __name__ == "__main__": parser = ArgumentParser( description=( "Command line to convert the original oneformer models (with swin backbone) to transformers" " implementation." ) ) parser.add_argument( "--checkpoints_dir", type=Path, help=( "A directory containing the model's checkpoints. The directory has to have the following structure:" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.pth; where <CONFIG_NAME> name must follow the" " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>" ), ) parser.add_argument( "--configs_dir", type=Path, help=( "A directory containing the model's configs, see detectron2 doc. The directory has to have the following" " structure: <DIR_NAME>/<DATASET_NAME>/<CONFIG_NAME>.yaml; where <CONFIG_NAME> name must follow the" " following nomenclature nomenclature: oneformer_<DATASET_NAME>_<BACKBONE>_<BACKBONE_TYPE>" ), ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=Path, help="Path to the folder to output PyTorch models.", ) parser.add_argument( "--oneformer_dir", required=True, type=Path, help=( "A path to OneFormer's original implementation directory. You can download from here: " "https://github.com/SHI-Labs/OneFormer" ), ) args = parser.parse_args() checkpoints_dir: Path = args.checkpoints_dir config_dir: Path = args.configs_dir save_directory: Path = args.pytorch_dump_folder_path oneformer_dir: Path = args.oneformer_dir # append the path to the parents to oneformer dir sys.path.append(str(oneformer_dir.parent)) # and import what's needed from OneFormer.oneformer import add_common_config, add_dinat_config, add_oneformer_config, add_swin_config from OneFormer.oneformer.oneformer_model import OneFormer as OriginalOneFormer if not save_directory.exists(): save_directory.mkdir(parents=True) for config_file, checkpoint_file in OriginalOneFormerCheckpointToOursConverter.using_dirs( checkpoints_dir, config_dir ): processor = OriginalOneFormerConfigToProcessorConverter()( setup_cfg(Args(config_file=config_file)), os.path.join("shi-labs", config_file.stem) ) original_config = setup_cfg(Args(config_file=config_file)) oneformer_kwargs = OriginalOneFormer.from_config(original_config) original_model = OriginalOneFormer(**oneformer_kwargs).eval() DetectionCheckpointer(original_model).load(str(checkpoint_file)) is_swin = "swin" in config_file.stem config: OneFormerConfig = OriginalOneFormerConfigToOursConverter()(original_config, is_swin) oneformer = OneFormerModel(config=config).eval() converter = OriginalOneFormerCheckpointToOursConverter(original_model, config) oneformer = converter.convert(oneformer, is_swin) oneformer_for_universal_segmentation = OneFormerForUniversalSegmentation(config=config).eval() oneformer_for_universal_segmentation.model = oneformer test( original_model, oneformer_for_universal_segmentation, processor, os.path.join("shi-labs", config_file.stem), ) model_name = get_name(checkpoint_file) logger.info(f"🪄 Saving {model_name}") processor.save_pretrained(save_directory / model_name) oneformer_for_universal_segmentation.save_pretrained(save_directory / model_name) processor.push_to_hub( repo_id=os.path.join("shi-labs", config_file.stem), commit_message="Add configs", use_temp_dir=True, ) oneformer_for_universal_segmentation.push_to_hub( repo_id=os.path.join("shi-labs", config_file.stem), commit_message="Add model", use_temp_dir=True, )
transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py/0
{ "file_path": "transformers/src/transformers/models/oneformer/convert_to_hf_oneformer.py", "repo_id": "transformers", "token_count": 26211 }
518
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OPT model.""" from typing import Callable, Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_opt import OPTConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask logger = logging.get_logger(__name__) class OPTLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward( self, attention_mask: torch.LongTensor, past_key_values_length: int = 0, position_ids: Optional[torch.LongTensor] = None, ): """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() # cut positions if `past_key_values_length` is > 0 position_ids = position_ids[:, past_key_values_length:] return super().forward(position_ids + self.offset) # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config: OPTConfig, layer_idx: Optional[int] = None, **kwargs, ): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.dropout = config.attention_dropout self.enable_bias = config.enable_bias self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.head_dim = self.embed_dim // self.num_heads self.is_causal = True if (self.head_dim * self.num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, past_key_values: Optional[tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() # Scaling is susceptible to floating point arithmetics' inprecisions # which can lead to different results (this is dependent from model # to model, e.g. whisper is one such case). We therefore keep the # original order of scaling to follow the original implementation # and enforce no scaling (1.0) in the attention call below. query_states = self.q_proj(hidden_states) * self.scaling query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation key_states, value_states = past_key_values.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": if self.config._attn_implementation == "sdpa" and output_attentions: logger.warning_once( "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) else: attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=1.0, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class OPTDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OPTConfig, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OPTAttention(config=config, layer_idx=layer_idx) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm( self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine ) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias) self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.. """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, position_ids=position_ids, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class OPTPreTrainedModel(PreTrainedModel): config: OPTConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["OPTDecoderLayer"] _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() class OPTDecoder(OPTPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] Args: config: OPTConfig """ def __init__(self, config: OPTConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx) self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) if config.word_embed_proj_dim != config.hidden_size: self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm( config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine ) else: self.final_layer_norm = None self.layers = nn.ModuleList([OPTDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask @can_return_tuple def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. for padding use -1. [What are position IDs?](../glossary#position-ids) cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if input_ids is not None: input_ids = input_ids.view(-1, input_ids.shape[-1]) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if attention_mask is None: seq_length = past_seen_tokens + inputs_embeds.shape[1] attention_mask = torch.ones(inputs_embeds.shape[0], seq_length, device=inputs_embeds.device) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) # embed positions if position_ids is None: # position_ids = cache_position.unsqueeze(0) position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() # cut positions if `past_seen_tokens` is > 0 position_ids = position_ids[:, past_seen_tokens:] pos_embeds = self.embed_positions(attention_mask, past_seen_tokens, position_ids=position_ids) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds.to(inputs_embeds.device) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @auto_docstring class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.decoder = OPTDecoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_decoder(self): return self.decoder @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = OPTModel(config) # the lm_head weight is automatically tied to the embed tokens weight self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, OPTForCausalLM >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) logits = self.lm_head(outputs[0]).contiguous() loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" The OPT Model transformer with a sequence classification head on top (linear layer). [`OPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """ ) class OPTForSequenceClassification(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.num_labels = config.num_labels self.model = OPTModel(config) self.score = nn.Linear(config.word_embed_proj_dim, self.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, ) -> Union[tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value @auto_docstring class OPTForQuestionAnswering(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.model = OPTModel(config) self.qa_outputs = nn.Linear(config.word_embed_proj_dim, 2) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, ) -> Union[tuple, QuestionAnsweringModelOutput]: r""" Example: ```python >>> from transformers import AutoTokenizer, OPTForQuestionAnswering >>> import torch >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> # note: we are loading a OPTForQuestionAnswering from the hub here, >>> # so the head will be randomly initialized, hence the predictions will be random >>> model = OPTForQuestionAnswering.from_pretrained("facebook/opt-350m") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> answer_offset = len(tokenizer(question)[0]) >>> predict_answer_tokens = inputs.input_ids[ ... 0, answer_offset + answer_start_index : answer_offset + answer_end_index + 1 ... ] >>> predicted = tokenizer.decode(predict_answer_tokens) >>> predicted ' a nice puppet' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index).to(logits.device) end_positions = end_positions.clamp(0, ignored_index).to(logits.device) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + transformer_outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value __all__ = [ "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ]
transformers/src/transformers/models/opt/modeling_opt.py/0
{ "file_path": "transformers/src/transformers/models/opt/modeling_opt.py", "repo_id": "transformers", "token_count": 21142 }
519
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Image processor class for OWLv2.""" import warnings from typing import Optional, Union from transformers.models.owlvit.image_processing_owlvit_fast import OwlViTImageProcessorFast from ...image_processing_utils_fast import ( BatchFeature, DefaultFastImageProcessorKwargs, ) from ...image_transforms import group_images_by_shape, reorder_images from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, SizeDict, ) from ...processing_utils import Unpack from ...utils import ( TensorType, auto_docstring, is_torch_available, is_torchvision_available, is_torchvision_v2_available, ) if is_torch_available(): import torch if is_torchvision_v2_available(): from torchvision.transforms.v2 import functional as F elif is_torchvision_available(): from torchvision.transforms import functional as F class Owlv2FastImageProcessorKwargs(DefaultFastImageProcessorKwargs): r""" do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess` method. If `True`, padding will be applied to the bottom and right of the image with grey pixels. """ do_pad: Optional[bool] @auto_docstring class Owlv2ImageProcessorFast(OwlViTImageProcessorFast): resample = PILImageResampling.BILINEAR image_mean = OPENAI_CLIP_MEAN image_std = OPENAI_CLIP_STD size = {"height": 960, "width": 960} rescale_factor = 1 / 255 do_resize = True do_rescale = True do_normalize = True do_pad = True valid_kwargs = Owlv2FastImageProcessorKwargs crop_size = None do_center_crop = None def __init__(self, **kwargs: Unpack[Owlv2FastImageProcessorKwargs]): OwlViTImageProcessorFast().__init__(**kwargs) @auto_docstring def preprocess(self, images: ImageInput, **kwargs: Unpack[Owlv2FastImageProcessorKwargs]): return OwlViTImageProcessorFast().preprocess(images, **kwargs) def _pad_images(self, images: "torch.Tensor", constant_value: float = 0.5) -> "torch.Tensor": """ Pad an image with zeros to the given size. """ height, width = images.shape[-2:] size = max(height, width) pad_bottom = size - height pad_right = size - width padding = (0, 0, pad_right, pad_bottom) padded_image = F.pad(images, padding, fill=constant_value) return padded_image def pad( self, images: list["torch.Tensor"], disable_grouping: Optional[bool], constant_value: float = 0.5, ) -> list["torch.Tensor"]: grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): stacked_images = self._pad_images( stacked_images, constant_value=constant_value, ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) return processed_images def resize( self, image: "torch.Tensor", size: SizeDict, anti_aliasing: bool = True, anti_aliasing_sigma=None, **kwargs, ) -> "torch.Tensor": """ Resize an image as per the original implementation. Args: image (`Tensor`): Image to resize. size (`dict[str, int]`): Dictionary containing the height and width to resize the image to. anti_aliasing (`bool`, *optional*, defaults to `True`): Whether to apply anti-aliasing when downsampling the image. anti_aliasing_sigma (`float`, *optional*, defaults to `None`): Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated automatically. """ output_shape = (size.height, size.width) input_shape = image.shape # select height and width from input tensor factors = torch.tensor(input_shape[2:]).to(image.device) / torch.tensor(output_shape).to(image.device) if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = ((factors - 1) / 2).clamp(min=0) else: anti_aliasing_sigma = torch.atleast_1d(anti_aliasing_sigma) * torch.ones_like(factors) if torch.any(anti_aliasing_sigma < 0): raise ValueError("Anti-aliasing standard deviation must be greater than or equal to zero") elif torch.any((anti_aliasing_sigma > 0) & (factors <= 1)): warnings.warn( "Anti-aliasing standard deviation greater than zero but not down-sampling along all axes" ) if torch.any(anti_aliasing_sigma == 0): filtered = image else: kernel_sizes = 2 * torch.ceil(3 * anti_aliasing_sigma).int() + 1 filtered = F.gaussian_blur( image, (kernel_sizes[0], kernel_sizes[1]), sigma=anti_aliasing_sigma.tolist() ) else: filtered = image out = F.resize(filtered, size=(size.height, size.width), antialias=False) return out def _preprocess( self, images: list["torch.Tensor"], do_resize: bool, size: SizeDict, interpolation: Optional["F.InterpolationMode"], do_pad: bool, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Optional[Union[float, list[float]]], image_std: Optional[Union[float, list[float]]], disable_grouping: Optional[bool], return_tensors: Optional[Union[str, TensorType]], **kwargs, ) -> BatchFeature: # Group images by size for batched resizing grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Rescale images before other operations as done in original implementation stacked_images = self.rescale_and_normalize( stacked_images, do_rescale, rescale_factor, False, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) if do_pad: processed_images = self.pad(processed_images, disable_grouping=disable_grouping) grouped_images, grouped_images_index = group_images_by_shape( processed_images, disable_grouping=disable_grouping ) resized_images_grouped = {} for shape, stacked_images in grouped_images.items(): if do_resize: resized_stack = self.resize( image=stacked_images, size=size, interpolation=interpolation, input_data_format=ChannelDimension.FIRST, ) resized_images_grouped[shape] = resized_stack resized_images = reorder_images(resized_images_grouped, grouped_images_index) # Group images by size for further processing # Needed in case do_resize is False, or resize returns images with different sizes grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping) processed_images_grouped = {} for shape, stacked_images in grouped_images.items(): # Fused rescale and normalize stacked_images = self.rescale_and_normalize( stacked_images, False, rescale_factor, do_normalize, image_mean, image_std ) processed_images_grouped[shape] = stacked_images processed_images = reorder_images(processed_images_grouped, grouped_images_index) processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors) __all__ = ["Owlv2ImageProcessorFast"]
transformers/src/transformers/models/owlv2/modular_owlv2.py/0
{ "file_path": "transformers/src/transformers/models/owlv2/modular_owlv2.py", "repo_id": "transformers", "token_count": 3774 }
520
# coding=utf-8 # Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PEGASUS-X model.""" import math from dataclasses import dataclass from typing import Callable, Optional, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, ) from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import auto_docstring, is_torch_flex_attn_available, is_torchdynamo_compiling, logging from ...utils.deprecation import deprecate_kwarg from .configuration_pegasus_x import PegasusXConfig if is_torch_flex_attn_available(): from ...integrations.flex_attention import BlockMask, make_flex_block_causal_mask logger = logging.get_logger(__name__) @dataclass class DimensionInfo: """Wrapper for dimension info.""" batch_size: int # batch size seq_len: int # token length block_size: int # block size num_heads: int # num heads hidden_dim: int # hidden dim dim_per_head: int # dim per head num_blocks: int # num blocks global_len: int # global length padded_seq_len: int # padded token seq length # Note: Compared to the original Flax implementation, we will pad the token representations to # a multiple of block size at the start of the encoder layers, so T=P always. # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->PegasusX class PegasusXScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class PegasusXSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, embed_dim, max_scale: int = 10000.0): super().__init__() self.embed_dim = embed_dim self.max_scale = max_scale @torch.no_grad() def forward( self, input_embeds: torch.Tensor, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None ) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" batch_size, seq_len = input_embeds.shape[:2] if position_ids is None: position_ids = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device )[:, None] pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype) half_d_feature = self.embed_dim // 2 div_term = torch.exp( torch.arange(half_d_feature, device=input_embeds.device, dtype=torch.int64).type_as(input_embeds) * -(np.log(float(self.max_scale)) / (half_d_feature - 1)) ) pe[:, :half_d_feature] = torch.sin(position_ids * div_term) pe[:, half_d_feature:] = torch.cos(position_ids * div_term) return pe[None].expand(batch_size, -1, -1) # Copied from transformers.models.bart.modeling_bart.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: Optional[float] = None, dropout: float = 0.0, head_mask: Optional[torch.Tensor] = None, **kwargs, ): if scaling is None: scaling = query.size(-1) ** -0.5 attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if head_mask is not None: attn_weights = attn_weights * head_mask.view(1, -1, 1, 1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PegasusX class PegasusXAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, is_causal: bool = False, config: Optional[PegasusXConfig] = None, layer_idx: Optional[int] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.is_causal = is_causal self.layer_idx = layer_idx if layer_idx is None and self.is_decoder: logger.warning_once( f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and " "will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, # TODO: we need a refactor so that the different attention modules can get their specific kwargs # ATM, we have mixed things encoder, decoder, and encoder-decoder attn **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None # determine input shapes bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) # get query proj query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(*kv_input_shape).transpose(1, 2) value_states = value_states.view(*kv_input_shape).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs, ) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return attn_output, attn_weights class PegasusXGlobalLocalAttention(nn.Module): """Global + Local attention. For use with Encoder only.""" def __init__( self, embed_dim: int, num_heads: int, block_size: int, dropout: float = 0.0, is_decoder: bool = False, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.block_size = block_size self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, token_hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" dim = DimensionInfo( batch_size=token_hidden_states.shape[0], seq_len=token_hidden_states.shape[1], block_size=self.block_size, num_heads=self.num_heads, hidden_dim=token_hidden_states.shape[2], dim_per_head=self.head_dim, num_blocks=token_hidden_states.shape[1] // self.block_size, global_len=global_hidden_states.shape[1], padded_seq_len=token_hidden_states.shape[1], ) # [batch_size, num_heads, padded_seq_len, dim_per_head] local_q = self._shape( self.q_proj(token_hidden_states) * self.scaling, seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) local_k = self._shape( self.k_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) local_v = self._shape( self.v_proj(token_hidden_states), seq_len=dim.padded_seq_len, bsz=dim.batch_size, ) # [batch_size, num_heads, global_len, dim_per_head] global_q = self._shape( self.q_proj(global_hidden_states) * self.scaling, seq_len=dim.global_len, bsz=dim.batch_size, ) global_k = self._shape( self.k_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size, ) global_v = self._shape( self.v_proj(global_hidden_states), seq_len=dim.global_len, bsz=dim.batch_size, ) global_attn_output, global_attn_probs = self.compute_global_attention_representations( global_q=global_q, global_k=global_k, global_v=global_v, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim, ) local_attn_output, local_attn_probs = self.compute_local_attention_representations( global_k=global_k, global_v=global_v, local_q=local_q, local_k=local_k, local_v=local_v, mask=attention_mask, dim=dim, ) # [batch_size, global_len, hidden_dim] global_attn_output = ( global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim) ) # [batch_size, global_len, hidden_dim] global_attn_output = self.out_proj(global_attn_output) # [batch_size, num_heads, block_size, num_heads, dim_per_head] local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous() # [batch_size, padded_seq_len, hidden_dim] local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim) # [batch_size, padded_seq_len, hidden_dim] local_attn_output = self.out_proj(local_attn_output) if output_attentions: attn_probs = {"global": global_attn_probs, "local": local_attn_probs} else: attn_probs = None return local_attn_output, global_attn_output, attn_probs def compute_global_attention_representations( self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo ): """Compute attention representations for global tokens. Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input sequence tokens are arranged in blocks for local attention, we unblock them and compute attention. Args: global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: query vectors from global tokens global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: key vectors from global tokens global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: value vectors from global tokens local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: key vectors from local tokens local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: value vectors from local tokens mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask dim (DimensionInfo): DimensionInfo wrapper for dimensions Returns: output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size """ # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] global_and_local_k = torch.cat([global_k, local_k], dim=2) # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] global_and_local_v = torch.cat([global_v, local_v], dim=2) # [batch_size, global_len+padded_seq_len] extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0) # [batch_size, num_heads, global_len, global_len+padded_seq_len] attn_weights = torch.einsum("BHGF,BHXF->BHGX", global_q, global_and_local_k) attn_weights = attn_weights + extended_mask[:, None, None, :] attn_probs = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) # [batch_size, num_heads, global_len, F] attn_output = torch.einsum("BHGX,BHXF->BHGF", attn_probs, global_and_local_v) return attn_output, attn_probs def compute_local_attention_representations( self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo ): """Compute attention representations for local tokens. Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence, we need to tile and concatenate the global tokens to every local block Args: global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: key vectors from global tokens global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: value vectors from global tokens local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: query vectors from local tokens local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: key vectors from local tokens local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: value vectors from local tokens mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask dim (DimensionInfo): DimensionInfo wrapper for dimensions Returns: output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size """ # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) # [batch_size, num_blocks, global_len+block_size] extended_mask = nn.functional.pad( mask.view(dim.batch_size, dim.num_blocks, dim.block_size), pad=(dim.global_len, 0), value=0, ) # [batch_size, num_heads, num_blocks, block_size, global_len] blocked_local2global = torch.einsum("BHNKF,BHGF->BHNKG", blocked_local_q, global_k) # [batch_size, num_heads, num_blocks, block_size, block_size] blocked_local2local = torch.einsum("BHNKF,BHNXF->BHNKX", blocked_local_q, blocked_local_k) # [batch_size, num_heads, num_blocks, block_size, global_len+block_size] attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1) attn_weights = attn_weights + extended_mask[:, None, :, None, :] attn_probs = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) # [batch_size, num_heads, num_blocks, block_size, global_len] local2global_attn_probs = attn_probs[:, :, :, :, : dim.global_len] # [batch_size, num_heads, num_blocks, block_size, block_size] local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len :] # [batch_size, num_heads, num_blocks, block_size, dim_per_head] local2global_attn_output = torch.einsum("BHNKG,BHGF->BHNKF", local2global_attn_probs, global_v) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] local2local_attn_output = torch.einsum("BHNKX,BHNXF->BHNKF", local2local_attn_probs, blocked_local_v) # [batch_size, num_heads, num_blocks, block_size, dim_per_head] attn_output = local2global_attn_output + local2local_attn_output return attn_output, attn_probs class PegasusXEncoderLayer(GradientCheckpointingLayer): def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = PegasusXGlobalLocalAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, block_size=config.block_size, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.stagger_blocks_this_layer = stagger_blocks_this_layer self.block_size = config.block_size def forward( self, hidden_states: torch.Tensor, global_hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* global_hidden_states (`torch.FloatTensor`): global token hidden states *(seq_len, num_global_tokens, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states global_residual = global_hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states) if self.stagger_blocks_this_layer: # Pad the blocks to simulate staggering hidden_states, attention_mask = self.pad_local_tokens( hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size ) hidden_states, global_hidden_states, attn_weights = self.self_attn( token_hidden_states=hidden_states, global_hidden_states=global_hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) if self.stagger_blocks_this_layer: # Undo the padding hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) global_hidden_states = global_residual + global_hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states global_residual = global_hidden_states global_hidden_states = self.final_layer_norm(global_hidden_states) global_hidden_states = self.activation_fn(self.fc1(global_hidden_states)) global_hidden_states = nn.functional.dropout( global_hidden_states, p=self.activation_dropout, training=self.training ) global_hidden_states = self.fc2(global_hidden_states) global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) global_hidden_states = global_residual + global_hidden_states outputs = (hidden_states, global_hidden_states) if output_attentions: outputs += (attn_weights,) return outputs @classmethod def pad_local_tokens(cls, hidden_states, attention_mask, block_size): # hidden_states: [batch_size, seq_len, hidden_dim] pad_size = block_size // 2 mask_min_value = torch.finfo(hidden_states.dtype).min padded_hidden_states = torch.nn.functional.pad( hidden_states, pad=(0, 0, pad_size, pad_size), ) padded_mask = torch.nn.functional.pad( attention_mask, pad=(pad_size, pad_size), value=mask_min_value, ) return padded_hidden_states, padded_mask @classmethod def unpad_local_tokens(cls, padded_hidden_states, block_size): # padded_hidden_states: [batch_size, padded seq_len, hidden_dim] pad_size = block_size // 2 return padded_hidden_states[:, pad_size:-pad_size, :] class PegasusXDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: PegasusXConfig, layer_idx: Optional[int] = None): super().__init__() self.embed_dim = config.d_model self.self_attn = PegasusXAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = PegasusXAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=False, config=config, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* attention_mask (`torch.FloatTensor`): attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape *(seq_len, batch, embed_dim)* encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache: Whether to us KV cache for decoding cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class PegasusXPreTrainedModel(PreTrainedModel): config: PegasusXConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = [r"PegasusXEncoderLayer", r"PegasusXDecoderLayer"] _supports_flash_attn = True # Flaky logits _supports_sdpa = False _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_full_mask def _update_full_mask( self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor, ): if attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": attention_mask = attention_mask if 0 in attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & head_mask can not be supported when using SDPA, fall back to # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype) elif self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) return attention_mask # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_causal_mask def _update_causal_mask( self, attention_mask: Optional[Union[torch.Tensor, "BlockMask"]], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, ): if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) # Other attention flavors support in-built causal (when `mask is None`) # while we need to create our specific block mask regardless elif attention_mask is None: attention_mask = make_flex_block_causal_mask( torch.ones( size=(input_tensor.shape[0], input_tensor.shape[1]), device=attention_mask.device, ) ) return attention_mask if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # Copied from transformers.models.bart.modeling_bart.BartPreTrainedModel._update_cross_attn_mask def _update_cross_attn_mask( self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, ): # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: if self.config._attn_implementation == "flash_attention_2": encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None elif self.config._attn_implementation == "sdpa": # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1], ) elif self.config._attn_implementation == "flex_attention": if isinstance(encoder_attention_mask, torch.Tensor): encoder_attention_mask = make_flex_block_causal_mask( encoder_attention_mask, query_length=input_shape[-1], is_causal=False, ) else: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) return encoder_attention_mask class PegasusXEncoder(PegasusXPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PegasusXEncoderLayer`]. Args: config: PegasusXConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = PegasusXScaledWordEmbedding( config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale ) self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim) self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim) self.layers = nn.ModuleList( [ PegasusXEncoderLayer( stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config ) for i in range(config.encoder_layers) ] ) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") self.config.max_position_embeddings = new_num_position_embeddings self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model) self.embed_positions.to(self.device) def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings matrix """ return self.embed_positions def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(inputs_embeds) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) batch_size, seq_len, _ = hidden_states.shape # Setup mask if attention_mask is None: attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device) attention_mask = attention_mask.to(dtype=hidden_states.dtype) mask_min_value = torch.finfo(hidden_states.dtype).min inverted_mask = 1.0 - attention_mask attention_mask = inverted_mask.masked_fill( inverted_mask.to(torch.bool), mask_min_value, ) # padding to block_size if seq_len % self.config.block_size != 0: pad_len = self.config.block_size - seq_len % self.config.block_size hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0) attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value) # Global tokens global_hidden_states = self.embed_global( torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1) ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, global_hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] global_hidden_states = layer_outputs[1] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) # Undo padding-to-block-size hidden_states = hidden_states[:, :seq_len] hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + ((hidden_states, global_hidden_states),) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class PegasusXDecoder(PegasusXPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`] Args: config: PegasusXConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.max_target_positions = config.max_position_embeddings embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 padding_idx = config.pad_token_id if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = PegasusXScaledWordEmbedding( config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale ) self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model) self.layers = nn.ModuleList([PegasusXDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..." ) use_cache = False # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = ( past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values ) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, self_attn_cache, ) encoder_attention_mask = self._update_cross_attn_mask( encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds, ) # embed positions position_ids = cache_position.unsqueeze(1) position_ids = self.embed_positions(inputs_embeds, past_key_values_length, position_ids) position_ids = position_ids.to(inputs_embeds.device) hidden_states = inputs_embeds + position_ids hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, causal_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring class PegasusXModel(PegasusXPreTrainedModel): _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: PegasusXConfig): super().__init__(config) vocab_size = config.vocab_size embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 padding_idx = config.pad_token_id self.shared = PegasusXScaledWordEmbedding( vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale ) self.encoder = PegasusXEncoder(config, self.shared) self.decoder = PegasusXDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.config.max_position_embeddings = new_num_position_embeddings self.encoder.resize_position_embeddings(new_num_position_embeddings) self.decoder.resize_position_embeddings(new_num_position_embeddings) def get_position_embeddings(self) -> tuple[nn.Embedding]: """ Returns the position embeddings matrix """ return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings()) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[torch.FloatTensor]] = None, past_key_values: Optional[tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. Example: ```python >>> from transformers import AutoTokenizer, PegasusModel >>> tokenizer = AutoTokenizer.from_pretrained("google/pegasus-x-large") >>> model = PegasusModel.from_pretrained("google/pegasus-x-large") >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 4, 1024] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The PEGASUS-X for conditional generation (e.g. summarization). """ ) class PegasusXForConditionalGeneration(PegasusXPreTrainedModel, GenerationMixin): base_model_prefix = "model" _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "lm_head.weight"] def __init__(self, config: PegasusXConfig): super().__init__(config) self.model = PegasusXModel(config) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ self.config.max_position_embeddings = new_num_position_embeddings self.model.encoder.resize_position_embeddings(new_num_position_embeddings) self.model.decoder.resize_position_embeddings(new_num_position_embeddings) def get_position_embeddings(self) -> tuple[nn.Embedding]: """ Returns the position embeddings matrix """ return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings()) @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[torch.FloatTensor]] = None, past_key_values: Optional[tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqLMOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) lm_logits = self.lm_head(outputs[0]) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) # Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PegasusX class PegasusXDecoderWrapper(PegasusXPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = PegasusXDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) __all__ = ["PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel"]
transformers/src/transformers/models/pegasus_x/modeling_pegasus_x.py/0
{ "file_path": "transformers/src/transformers/models/pegasus_x/modeling_pegasus_x.py", "repo_id": "transformers", "token_count": 34811 }
521
# coding=utf-8 # Copyright 2025 Meta Platforms, Inc. and the HuggingFace Inc. team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Video processor class for PerceptionLM.""" from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ) from ...processing_utils import Unpack, VideosKwargs from ...utils import is_vision_available from ...utils.import_utils import requires from ...video_processing_utils import ( BaseVideoProcessor, ) if is_vision_available(): from ...image_utils import PILImageResampling class PerceptionLMFastVideoProcessorInitKwargs(VideosKwargs): ... @requires(backends=("torchvision",)) class PerceptionLMVideoProcessor(BaseVideoProcessor): resample = PILImageResampling.BICUBIC image_mean = IMAGENET_STANDARD_MEAN image_std = IMAGENET_STANDARD_STD size = {"height": 448, "width": 448} do_resize = True do_center_crop = False do_rescale = True do_normalize = True do_convert_rgb = True valid_kwargs = PerceptionLMFastVideoProcessorInitKwargs model_input_names = ["pixel_values_videos"] def __init__(self, **kwargs: Unpack[PerceptionLMFastVideoProcessorInitKwargs]): super().__init__(**kwargs) __all__ = ["PerceptionLMVideoProcessor"]
transformers/src/transformers/models/perception_lm/video_processing_perception_lm.py/0
{ "file_path": "transformers/src/transformers/models/perception_lm/video_processing_perception_lm.py", "repo_id": "transformers", "token_count": 574 }
522
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import re import torch from peft import LoraConfig from safetensors.torch import load_file, save_file from transformers import ( AutoProcessor, Phi4MultimodalAudioConfig, Phi4MultimodalConfig, Phi4MultimodalFeatureExtractor, Phi4MultimodalForCausalLM, Phi4MultimodalImageProcessorFast, Phi4MultimodalProcessor, Phi4MultimodalVisionConfig, ) CHAT_TEMPLATE = "{% for message in messages %}{{ '<|' + message['role'] + '|>' }}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<|image|>' }}{% elif content['type'] == 'audio' %}{{ '<|audio|>' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% endif %}{{ '<|end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}" # fmt: off STATE_DICT_MAPPING = { r"^model.embed_tokens_extend.audio_embed.encoder.encoders.(\d+).feed_forward_(in|out).net.0.linear": r"model.embed_tokens_extend.audio_embed.encoder.encoders.\1.feed_forward_\2.gate_up_proj", r"^model.embed_tokens_extend.audio_embed.encoder.encoders.(\d+).feed_forward_(in|out).net.2": r"model.embed_tokens_extend.audio_embed.encoder.encoders.\1.feed_forward_\2.down_proj", r"^model.embed_tokens_extend.audio_embed.encoder.encoders.(\d+).self_attn.linear_(q|k|v)": r"model.embed_tokens_extend.audio_embed.encoder.encoders.\1.self_attn.\2_proj", r"^model.embed_tokens_extend.audio_embed.encoder.encoders.(\d+).self_attn.linear_out": r"model.embed_tokens_extend.audio_embed.encoder.encoders.\1.self_attn.o_proj", r"^model.embed_tokens_extend.image_embed.img_projection.0": r"model.embed_tokens_extend.image_embed.img_projection_up", r"^model.embed_tokens_extend.image_embed.img_projection.2": r"model.embed_tokens_extend.image_embed.img_projection_down", r"^model.embed_tokens_extend.image_embed.glb_GN": r"model.embed_tokens_extend.image_embed.global_img_feature_extensor", r"^model.embed_tokens_extend.image_embed.sub_GN": r"model.embed_tokens_extend.image_embed.sub_img_feature_extensor", r"^model.embed_tokens_extend.audio_embed.audio_projection.speech.0": r"model.embed_tokens_extend.audio_embed.up_proj_for_speech", r"^model.embed_tokens_extend.audio_embed.audio_projection.speech.2": r"model.embed_tokens_extend.audio_embed.down_proj_for_speech", r"^model.embed_tokens_extend.audio_embed.audio_projection.vision.0": r"model.embed_tokens_extend.audio_embed.up_proj_for_vision_speech", r"^model.embed_tokens_extend.audio_embed.audio_projection.vision.2": r"model.embed_tokens_extend.audio_embed.down_proj_for_vision_speech", } # fmt: on def map_old_key_to_new(old_key): """Map of a key of the original state dict to the equivalent key in HF format""" for pattern, replacement in STATE_DICT_MAPPING.items(): new_key, n_replace = re.subn(pattern, replacement, old_key) # Early exit of the loop if n_replace > 0: return new_key # The state dict contains lora keys.... if "lora" in old_key: return None # This extracts the original weight before adding the lora adapter if "base_layer." in old_key: return old_key.replace("base_layer.", "") # not part of the key mapping, we keep the original name return old_key def convert_state_dict(original_state_dict: dict): """Convert a state dict file.""" new_dict = {} for old_key, tensor in original_state_dict.items(): new_key = map_old_key_to_new(old_key) if new_key is not None: new_dict[new_key] = tensor return new_dict def convert_config(original_config: dict): # Remove unused args original_config.pop("_name_or_path", None) original_config.pop("architectures", None) original_config.pop("auto_map", None) original_config.pop("vision_lora", None) original_config.pop("speech_lora", None) original_config.pop("transformers_version", None) original_config.pop("_attn_implementation", None) embd_layer = original_config.pop("embd_layer") audio_embd_layer = embd_layer["audio_embd_layer"] vision_embd_layer = embd_layer["image_embd_layer"] # Keep only some of the subdict keep_audio_embd_layer = ["downsample_rate"] keep_vision_embd_layer = ["crop_size"] audio_embd_layer = {k: v for k, v in audio_embd_layer.items() if k in keep_audio_embd_layer} vision_embd_layer = {k: v for k, v in vision_embd_layer.items() if k in keep_vision_embd_layer} audio_config = original_config.pop("audio_processor")["config"] # remove audio_config.pop("activation_checkpointing", None) audio_config.pop("cnn_layer_norm", None) audio_config.pop("input_layer", None) audio_config.pop("batch_norm", None) audio_config.pop("encoder_embedding_config", None) audio_config.pop("ext_pw_kernel_size", None) audio_config.pop("bias_in_glu", None) audio_config.pop("causal", None) # rename audio_config["hidden_size"] = audio_config.pop("attention_dim") audio_config["num_attention_heads"] = audio_config.pop("attention_heads") audio_config["intermediate_size"] = audio_config.pop("linear_units") audio_config["nemo_conv_channels"] = audio_config.pop("nemo_conv_settings")["conv_channels"] audio_config["bias_max_distance"] = audio_config.pop("relative_attention_bias_args")["t5_bias_max_distance"] # add audio_config = {**audio_config, **audio_embd_layer} # Create transformers config objects audio_config = Phi4MultimodalAudioConfig(**audio_config) vision_config = Phi4MultimodalVisionConfig(**vision_embd_layer) # Add 2nd eos to config original_config["eos_token_id"] = [199999, 200020] new_config = Phi4MultimodalConfig(**original_config, vision_config=vision_config, audio_config=audio_config) return new_config def read_json(path): with open(path, "r") as f: return json.load(f) def convert_and_write_model(input_dir: str, output_dir: str): """Convert the model and save it (this implicitly save the config as well).""" original_config = read_json(os.path.join(input_dir, "config.json")) config = convert_config(original_config) full_state_dict = {} shards = [file for file in os.listdir(input_dir) if file.endswith(".safetensors")] for shard_file in shards: original_state_dict = load_file(os.path.join(input_dir, shard_file)) new_dict = convert_state_dict(original_state_dict) full_state_dict.update(new_dict) # Load weights into model and resave them with torch.device("meta"): model = Phi4MultimodalForCausalLM(config) missing, unexpected = model.load_state_dict(full_state_dict, strict=False, assign=True) # The lm_head is missing because it's tied if missing != ["lm_head.weight"]: raise ValueError("Missing keys:\n{missing}") if len(unexpected) > 0: raise ValueError(f"Unexpected keys:\n{unexpected}") model.tie_weights() model.save_pretrained(output_dir) def convert_and_save_processor(input_dir: str, output_dir: str): """Convert the processor.""" original_processor = AutoProcessor.from_pretrained(input_dir, trust_remote_code=True) original_processor.tokenizer.extra_special_tokens = {"image_token": "<|image|>", "audio_token": "<|audio|>"} # We need to add those temporarily to instantiate the processor original_processor.tokenizer.image_token = "<|image|>" original_processor.tokenizer.audio_token = "<|audio|>" original_processor.tokenizer.image_token_id = 200010 original_processor.tokenizer.audio_token_id = 200011 converted_processor = Phi4MultimodalProcessor( tokenizer=original_processor.tokenizer, image_processor=Phi4MultimodalImageProcessorFast(), audio_processor=Phi4MultimodalFeatureExtractor(), chat_template=CHAT_TEMPLATE, ) # We remove them before saving to avoid polluting somehow del converted_processor.tokenizer.image_token del converted_processor.tokenizer.image_token_id del converted_processor.tokenizer.audio_token del converted_processor.tokenizer.audio_token_id # Save the processor converted_processor.save_pretrained(output_dir) # we need to rename a few tokens but tokenizers doesn't allow doing that programmatically # To avoid consufion and manual renaming, the below part load and re-saved each json file vocab = json.load(open(f"{output_dir}/vocab.json", "r")) vocab["<|endoftext11|>"] = "<|audio|>" vocab["<|endoftext10|>"] = "<|image|>" json.dump(vocab, open(f"{output_dir}/vocab.json", "w")) tokenizer = json.load(open(f"{output_dir}/tokenizer.json", "r")) tokenizer["added_tokens"][1]["content"] = "<|image|>" tokenizer["added_tokens"][2]["content"] = "<|audio|>" tokenizer["model"]["vocab"]["<|audio|>"] = tokenizer["model"]["vocab"]["<|endoftext11|>"] tokenizer["model"]["vocab"]["<|image|>"] = tokenizer["model"]["vocab"]["<|endoftext10|>"] del tokenizer["model"]["vocab"]["<|endoftext11|>"] del tokenizer["model"]["vocab"]["<|endoftext10|>"] json.dump(tokenizer, open(f"{output_dir}/tokenizer.json", "w")) tokenizer_config = json.load(open(f"{output_dir}/tokenizer_config.json", "r")) tokenizer_config["added_tokens_decoder"]["200010"]["content"] = "<|image|>" tokenizer_config["added_tokens_decoder"]["200011"]["content"] = "<|audio|>" json.dump(tokenizer_config, open(f"{output_dir}/tokenizer_config.json", "w")) def extract_adapters_data(input_dir: str, output_dir: str): """Extract adapters data from the state dict and save weights and configs.""" speech_lora = {} vision_lora = {} shards = [file for file in os.listdir(input_dir) if file.endswith(".safetensors")] for shard_file in shards: original_state_dict = load_file(os.path.join(input_dir, shard_file)) for k, v in original_state_dict.items(): if "lora" in k: if "speech" in k: speech_lora[k.replace("speech.", "")] = v elif "vision" in k: vision_lora[k.replace("vision.", "")] = v # Create and save the lora configs speech_lora_config = LoraConfig( r=320, lora_alpha=640, target_modules=r"model.layers.\d+.((self_attn.(qkv|o)_proj)|(mlp.(gate_up|down)_proj))", lora_dropout=0.01, task_type="CAUSAL_LM", ) speech_lora_config.save_pretrained(os.path.join(output_dir, "speech-lora")) vision_lora_config = LoraConfig( r=256, lora_alpha=512, target_modules=r"model.layers.\d+.((self_attn.(qkv|o)_proj)|(mlp.(gate_up|down)_proj))", lora_dropout=0.0, task_type="CAUSAL_LM", ) vision_lora_config.save_pretrained(os.path.join(output_dir, "vision-lora")) save_file(speech_lora, os.path.join(output_dir, "speech-lora", "adapter_model.safetensors")) save_file(vision_lora, os.path.join(output_dir, "vision-lora", "adapter_model.safetensors")) def main(): parser = argparse.ArgumentParser() parser.add_argument( "input_dir", help="Location of the model folder containing the weights and configs.", ) parser.add_argument( "output_dir", help="Location to write HF model.", ) args = parser.parse_args() # Convert convert_and_write_model(args.input_dir, args.output_dir) convert_and_save_processor(args.input_dir, args.output_dir) extract_adapters_data(args.input_dir, args.output_dir) if __name__ == "__main__": main()
transformers/src/transformers/models/phi4_multimodal/convert_phi4_multimodal_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/phi4_multimodal/convert_phi4_multimodal_weights_to_hf.py", "repo_id": "transformers", "token_count": 4928 }
523
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for Pix2Struct. """ from typing import Optional, Union from ...feature_extraction_utils import BatchFeature from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput from ...utils import logging class Pix2StructImagesKwargs(ImagesKwargs, total=False): max_patches: Optional[int] header_text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] class Pix2StructProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Pix2StructImagesKwargs _defaults = { "text_kwargs": { "add_special_tokens": True, "padding": False, "stride": 0, "return_overflowing_tokens": False, "return_special_tokens_mask": False, "return_offsets_mapping": False, "return_token_type_ids": False, "return_length": False, "verbose": True, }, "images_kwargs": { "max_patches": 2048, }, } logger = logging.get_logger(__name__) class Pix2StructProcessor(ProcessorMixin): r""" Constructs a PIX2STRUCT processor which wraps a BERT tokenizer and PIX2STRUCT image processor into a single processor. [`Pix2StructProcessor`] offers all the functionalities of [`Pix2StructImageProcessor`] and [`T5TokenizerFast`]. See the docstring of [`~Pix2StructProcessor.__call__`] and [`~Pix2StructProcessor.decode`] for more information. Args: image_processor (`Pix2StructImageProcessor`): An instance of [`Pix2StructImageProcessor`]. The image processor is a required input. tokenizer (Union[`T5TokenizerFast`, `T5Tokenizer`]): An instance of ['T5TokenizerFast`] or ['T5Tokenizer`]. The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "Pix2StructImageProcessor" tokenizer_class = ("T5Tokenizer", "T5TokenizerFast") def __init__(self, image_processor, tokenizer): tokenizer.return_token_type_ids = False super().__init__(image_processor, tokenizer) def __call__( self, images=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None, audio=None, videos=None, **kwargs: Unpack[Pix2StructProcessorKwargs], ) -> Union[BatchEncoding, BatchFeature]: """ This method uses [`Pix2StructImageProcessor.preprocess`] method to prepare image(s) for the model, and [`T5TokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") output_kwargs = self._merge_kwargs( Pix2StructProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) add_special_tokens = output_kwargs["text_kwargs"].pop("add_special_tokens", None) # Get only text if images is None and not self.image_processor.is_vqa: output_kwargs["text_kwargs"]["add_special_tokens"] = ( add_special_tokens if add_special_tokens is not None else True ) self.current_processor = self.tokenizer text_encoding = self.tokenizer(text=text, **output_kwargs["text_kwargs"]) return text_encoding if not self.image_processor.is_vqa: # add pixel_values encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) else: # add pixel_values and bbox output_kwargs["images_kwargs"].setdefault("header_text", text) encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"]) if text is not None and not self.image_processor.is_vqa: output_kwargs["text_kwargs"]["add_special_tokens"] = ( add_special_tokens if add_special_tokens is not None else False ) text_encoding = self.tokenizer(text=text, **output_kwargs["text_kwargs"]) if "attention_mask" in text_encoding: text_encoding["decoder_attention_mask"] = text_encoding.pop("attention_mask") if "input_ids" in text_encoding: text_encoding["decoder_input_ids"] = text_encoding.pop("input_ids") else: text_encoding = None if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names decoder_ids = ["decoder_attention_mask", "decoder_input_ids"] return tokenizer_input_names + image_processor_input_names + decoder_ids __all__ = ["Pix2StructProcessor"]
transformers/src/transformers/models/pix2struct/processing_pix2struct.py/0
{ "file_path": "transformers/src/transformers/models/pix2struct/processing_pix2struct.py", "repo_id": "transformers", "token_count": 2324 }
524
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert PoolFormer checkpoints from the original repository. URL: https://github.com/sail-sg/poolformer""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def replace_key_with_offset(key, offset, original_name, new_name): """ Replaces the key by subtracting the offset from the original layer number """ to_find = original_name.split(".")[0] key_list = key.split(".") orig_block_num = int(key_list[key_list.index(to_find) - 2]) layer_num = int(key_list[key_list.index(to_find) - 1]) new_block_num = orig_block_num - offset key = key.replace(f"{orig_block_num}.{layer_num}.{original_name}", f"block.{new_block_num}.{layer_num}.{new_name}") return key def rename_keys(state_dict): new_state_dict = OrderedDict() total_embed_found, patch_emb_offset = 0, 0 for key, value in state_dict.items(): if key.startswith("network"): key = key.replace("network", "poolformer.encoder") if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias") and "patch_embed" not in key: patch_emb_offset += 1 to_replace = key[: key.find("proj")] key = key.replace(to_replace, f"patch_embeddings.{total_embed_found}.") key = key.replace("proj", "projection") if key.endswith("bias"): total_embed_found += 1 if "patch_embeddings" in key: key = "poolformer.encoder." + key if "mlp.fc1" in key: key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc1", "output.conv1") if "mlp.fc2" in key: key = replace_key_with_offset(key, patch_emb_offset, "mlp.fc2", "output.conv2") if "norm1" in key: key = replace_key_with_offset(key, patch_emb_offset, "norm1", "before_norm") if "norm2" in key: key = replace_key_with_offset(key, patch_emb_offset, "norm2", "after_norm") if "layer_scale_1" in key: key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_1", "layer_scale_1") if "layer_scale_2" in key: key = replace_key_with_offset(key, patch_emb_offset, "layer_scale_2", "layer_scale_2") if "head" in key: key = key.replace("head", "classifier") new_state_dict[key] = value return new_state_dict # We will verify our results on a COCO image def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @torch.no_grad() def convert_poolformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path): """ Copy/paste/tweak model's weights to our PoolFormer structure. """ # load default PoolFormer configuration config = PoolFormerConfig() # set attributes based on model_name repo_id = "huggingface/label-files" size = model_name[-3:] config.num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) # set config attributes id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} if size == "s12": config.depths = [2, 2, 6, 2] config.hidden_sizes = [64, 128, 320, 512] config.mlp_ratio = 4.0 crop_pct = 0.9 elif size == "s24": config.depths = [4, 4, 12, 4] config.hidden_sizes = [64, 128, 320, 512] config.mlp_ratio = 4.0 crop_pct = 0.9 elif size == "s36": config.depths = [6, 6, 18, 6] config.hidden_sizes = [64, 128, 320, 512] config.mlp_ratio = 4.0 config.layer_scale_init_value = 1e-6 crop_pct = 0.9 elif size == "m36": config.depths = [6, 6, 18, 6] config.hidden_sizes = [96, 192, 384, 768] config.mlp_ratio = 4.0 config.layer_scale_init_value = 1e-6 crop_pct = 0.95 elif size == "m48": config.depths = [8, 8, 24, 8] config.hidden_sizes = [96, 192, 384, 768] config.mlp_ratio = 4.0 config.layer_scale_init_value = 1e-6 crop_pct = 0.95 else: raise ValueError(f"Size {size} not supported") # load image processor image_processor = PoolFormerImageProcessor(crop_pct=crop_pct) # Prepare image image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values logger.info(f"Converting model {model_name}...") # load original state dict state_dict = torch.load(checkpoint_path, map_location=torch.device("cpu"), weights_only=True) # rename keys state_dict = rename_keys(state_dict) # create HuggingFace model and load state dict model = PoolFormerForImageClassification(config) model.load_state_dict(state_dict) model.eval() # Define image processor image_processor = PoolFormerImageProcessor(crop_pct=crop_pct) pixel_values = image_processor(images=prepare_img(), return_tensors="pt").pixel_values # forward pass outputs = model(pixel_values) logits = outputs.logits # define expected logit slices for different models if size == "s12": expected_slice = torch.tensor([-0.3045, -0.6758, -0.4869]) elif size == "s24": expected_slice = torch.tensor([0.4402, -0.1374, -0.8045]) elif size == "s36": expected_slice = torch.tensor([-0.6080, -0.5133, -0.5898]) elif size == "m36": expected_slice = torch.tensor([0.3952, 0.2263, -1.2668]) elif size == "m48": expected_slice = torch.tensor([0.1167, -0.0656, -0.3423]) else: raise ValueError(f"Size {size} not supported") # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3], expected_slice, atol=1e-2) # finally, save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...") Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) print(f"Saving image processor to {pytorch_dump_folder_path}") image_processor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) args = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
transformers/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py", "repo_id": "transformers", "token_count": 3265 }
525
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_prompt_depth_anything.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Copyright 2025 The HuggingFace Team. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Union import torch import torch.nn as nn from transformers.utils.generic import torch_int from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring from ...utils.backbone_utils import load_backbone from .configuration_prompt_depth_anything import PromptDepthAnythingConfig class PromptDepthAnythingLayer(nn.Module): def __init__(self, config: PromptDepthAnythingConfig): super().__init__() self.convolution1 = nn.Conv2d( 1, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) self.activation1 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) self.activation2 = nn.ReLU() self.convolution3 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) def forward(self, prompt_depth: torch.Tensor) -> torch.Tensor: hidden_state = self.convolution1(prompt_depth) hidden_state = self.activation1(hidden_state) hidden_state = self.convolution2(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution3(hidden_state) return hidden_state class PromptDepthAnythingPreActResidualLayer(nn.Module): """ ResidualConvUnit, pre-activate residual unit. Args: config (`[PromptDepthAnythingConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.activation1 = nn.ReLU() self.convolution1 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) self.activation2 = nn.ReLU() self.convolution2 = nn.Conv2d( config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True, ) def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: residual = hidden_state hidden_state = self.activation1(hidden_state) hidden_state = self.convolution1(hidden_state) hidden_state = self.activation2(hidden_state) hidden_state = self.convolution2(hidden_state) return hidden_state + residual class PromptDepthAnythingFeatureFusionLayer(nn.Module): """Feature fusion layer, merges feature maps from different stages. Args: config (`[PromptDepthAnythingConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config: PromptDepthAnythingConfig): super().__init__() self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True) self.residual_layer1 = PromptDepthAnythingPreActResidualLayer(config) self.residual_layer2 = PromptDepthAnythingPreActResidualLayer(config) self.prompt_depth_layer = PromptDepthAnythingLayer(config) def forward(self, hidden_state, residual=None, size=None, prompt_depth=None): if residual is not None: if hidden_state.shape != residual.shape: residual = nn.functional.interpolate( residual, size=hidden_state.shape[2:], mode="bilinear", align_corners=False ) hidden_state = hidden_state + self.residual_layer1(residual) hidden_state = self.residual_layer2(hidden_state) if prompt_depth is not None: prompt_depth = nn.functional.interpolate( prompt_depth, size=hidden_state.shape[2:], mode="bilinear", align_corners=False ) res = self.prompt_depth_layer(prompt_depth) hidden_state = hidden_state + res modifier = {"scale_factor": 2} if size is None else {"size": size} hidden_state = nn.functional.interpolate( hidden_state, **modifier, mode="bilinear", align_corners=True, ) hidden_state = self.projection(hidden_state) return hidden_state class PromptDepthAnythingFeatureFusionStage(nn.Module): def __init__(self, config): super().__init__() self.layers = nn.ModuleList() for _ in range(len(config.neck_hidden_sizes)): self.layers.append(PromptDepthAnythingFeatureFusionLayer(config)) def forward(self, hidden_states, size=None, prompt_depth=None): # reversing the hidden_states, we start from the last hidden_states = hidden_states[::-1] fused_hidden_states = [] fused_hidden_state = None for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)): size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None if fused_hidden_state is None: # first layer only uses the last hidden_state fused_hidden_state = layer(hidden_state, size=size, prompt_depth=prompt_depth) else: fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size, prompt_depth=prompt_depth) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states class PromptDepthAnythingDepthEstimationHead(nn.Module): """ Output head consisting of 3 convolutional layers. It progressively halves the feature dimension and upsamples the predictions to the input resolution after the first convolutional layer (details can be found in the DPT paper's supplementary material). The final activation function is either ReLU or Sigmoid, depending on the depth estimation type (relative or metric). For metric depth estimation, the output is scaled by the maximum depth used during pretraining. """ def __init__(self, config): super().__init__() self.head_in_index = config.head_in_index self.patch_size = config.patch_size features = config.fusion_hidden_size self.conv1 = nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(features // 2, config.head_hidden_size, kernel_size=3, stride=1, padding=1) self.activation1 = nn.ReLU() self.conv3 = nn.Conv2d(config.head_hidden_size, 1, kernel_size=1, stride=1, padding=0) if config.depth_estimation_type == "relative": self.activation2 = nn.ReLU() elif config.depth_estimation_type == "metric": self.activation2 = nn.Sigmoid() else: raise ValueError(f"Unknown depth estimation type: {config.depth_estimation_type}") self.max_depth = config.max_depth def forward(self, hidden_states: list[torch.Tensor], patch_height: int, patch_width: int) -> torch.Tensor: hidden_states = hidden_states[-1] predicted_depth = self.conv1(hidden_states) target_height = torch_int(patch_height * self.patch_size) target_width = torch_int(patch_width * self.patch_size) predicted_depth = nn.functional.interpolate( predicted_depth, (target_height, target_width), mode="bilinear", align_corners=True, ) predicted_depth = self.conv2(predicted_depth) predicted_depth = self.activation1(predicted_depth) predicted_depth = self.conv3(predicted_depth) predicted_depth = self.activation2(predicted_depth) # (batch_size, 1, height, width) -> (batch_size, height, width), which # keeps the same behavior as Depth Anything v1 & v2 predicted_depth = predicted_depth.squeeze(dim=1) return predicted_depth @auto_docstring class PromptDepthAnythingPreTrainedModel(PreTrainedModel): config: PromptDepthAnythingConfig base_model_prefix = "prompt_depth_anything" main_input_name = "pixel_values" supports_gradient_checkpointing = True class PromptDepthAnythingReassembleLayer(nn.Module): def __init__(self, config: PromptDepthAnythingConfig, channels: int, factor: int): super().__init__() self.projection = nn.Conv2d(in_channels=config.reassemble_hidden_size, out_channels=channels, kernel_size=1) # up/down sampling depending on factor if factor > 1: self.resize = nn.ConvTranspose2d(channels, channels, kernel_size=factor, stride=factor, padding=0) elif factor == 1: self.resize = nn.Identity() elif factor < 1: # so should downsample stride = torch_int(1 / factor) self.resize = nn.Conv2d(channels, channels, kernel_size=3, stride=stride, padding=1) def forward(self, hidden_state): hidden_state = self.projection(hidden_state) hidden_state = self.resize(hidden_state) return hidden_state class PromptDepthAnythingReassembleStage(nn.Module): """ This class reassembles the hidden states of the backbone into image-like feature representations at various resolutions. This happens in 3 stages: 1. Take the patch embeddings and reshape them to image-like feature representations. 2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`. 3. Resizing the spatial dimensions (height, width). Args: config (`[PromptDepthAnythingConfig]`): Model configuration class defining the model architecture. """ def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList() for channels, factor in zip(config.neck_hidden_sizes, config.reassemble_factors): self.layers.append(PromptDepthAnythingReassembleLayer(config, channels=channels, factor=factor)) def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`): List of hidden states from the backbone. """ out = [] for i, hidden_state in enumerate(hidden_states): # reshape to (batch_size, num_channels, height, width) hidden_state = hidden_state[:, 1:] batch_size, _, num_channels = hidden_state.shape hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() hidden_state = self.layers[i](hidden_state) out.append(hidden_state) return out class PromptDepthAnythingNeck(nn.Module): """ PromptDepthAnythingNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as input and produces another list of tensors as output. For PromptDepthAnything, it includes 2 stages: * PromptDepthAnythingReassembleStage * PromptDepthAnythingFeatureFusionStage. Args: config (dict): config dict. """ def __init__(self, config): super().__init__() self.config = config self.reassemble_stage = PromptDepthAnythingReassembleStage(config) self.convs = nn.ModuleList() for channel in config.neck_hidden_sizes: self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False)) # fusion self.fusion_stage = PromptDepthAnythingFeatureFusionStage(config) def forward( self, hidden_states: list[torch.Tensor], patch_height: Optional[int] = None, patch_width: Optional[int] = None, prompt_depth: Optional[torch.Tensor] = None, ) -> list[torch.Tensor]: """ Args: hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") # postprocess hidden states hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] # fusion blocks output = self.fusion_stage(features, prompt_depth=prompt_depth) return output @auto_docstring( custom_intro=""" Prompt Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. """ ) class PromptDepthAnythingForDepthEstimation(PromptDepthAnythingPreTrainedModel): _no_split_modules = ["DPTViTEmbeddings"] def __init__(self, config): super().__init__(config) self.backbone = load_backbone(config) self.neck = PromptDepthAnythingNeck(config) self.head = PromptDepthAnythingDepthEstimationHead(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: torch.FloatTensor, prompt_depth: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], DepthEstimatorOutput]: r""" prompt_depth (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*): Prompt depth is the sparse or low-resolution depth obtained from multi-view geometry or a low-resolution depth sensor. It generally has shape (height, width), where height and width can be smaller than those of the images. It is optional and can be None, which means no prompt depth will be used. If it is None, the output will be a monocular relative depth. The values are recommended to be in meters, but this is not necessary. Example: ```python >>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") >>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/prompt-depth-anything-vits-hf") >>> prompt_depth_url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true" >>> prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth) >>> with torch.no_grad(): ... outputs = model(**inputs) >>> # interpolate to original size >>> post_processed_output = image_processor.post_process_depth_estimation( ... outputs, ... target_sizes=[(image.height, image.width)], ... ) >>> # visualize the prediction >>> predicted_depth = post_processed_output[0]["predicted_depth"] >>> depth = predicted_depth * 1000. >>> depth = depth.detach().cpu().numpy() >>> depth = Image.fromarray(depth.astype("uint16")) # mm ``` """ loss = None if labels is not None: raise NotImplementedError("Training is not implemented yet") return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions outputs = self.backbone.forward_with_filtered_kwargs( pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions ) hidden_states = outputs.feature_maps _, _, height, width = pixel_values.shape patch_size = self.config.patch_size patch_height = height // patch_size patch_width = width // patch_size if prompt_depth is not None: # normalize prompt depth batch_size = prompt_depth.shape[0] depth_min = torch.min(prompt_depth.reshape(batch_size, -1), dim=1).values depth_max = torch.max(prompt_depth.reshape(batch_size, -1), dim=1).values depth_min, depth_max = depth_min.view(batch_size, 1, 1, 1), depth_max.view(batch_size, 1, 1, 1) prompt_depth = (prompt_depth - depth_min) / (depth_max - depth_min) # normalize done hidden_states = self.neck(hidden_states, patch_height, patch_width, prompt_depth=prompt_depth) predicted_depth = self.head(hidden_states, patch_height, patch_width) if prompt_depth is not None: # denormalize predicted depth depth_min = depth_min.squeeze(1).to(predicted_depth.device) depth_max = depth_max.squeeze(1).to(predicted_depth.device) predicted_depth = predicted_depth * (depth_max - depth_min) + depth_min # denormalize done if not return_dict: if output_hidden_states: output = (predicted_depth,) + outputs[1:] else: output = (predicted_depth,) + outputs[2:] return ((loss,) + output) if loss is not None else output return DepthEstimatorOutput( loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, ) __all__ = ["PromptDepthAnythingForDepthEstimation", "PromptDepthAnythingPreTrainedModel"]
transformers/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py/0
{ "file_path": "transformers/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py", "repo_id": "transformers", "token_count": 8373 }
526
# coding=utf-8 # Copyright 2024 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, # Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch PVTv2 model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import auto_docstring, logging from ...utils.backbone_utils import BackboneMixin from .configuration_pvt_v2 import PvtV2Config logger = logging.get_logger(__name__) # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt class PvtV2DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class PvtV2OverlapPatchEmbeddings(nn.Module): """Image to Patch Embedding""" def __init__(self, config: PvtV2Config, layer_idx: int): super().__init__() patch_size = config.patch_sizes[layer_idx] patch_size = (patch_size, patch_size) if isinstance(patch_size, int) else patch_size stride = config.strides[layer_idx] num_channels = config.num_channels if layer_idx == 0 else config.hidden_sizes[layer_idx - 1] hidden_size = config.hidden_sizes[layer_idx] self.patch_size = patch_size self.proj = nn.Conv2d( num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2), ) self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) def forward(self, pixel_values): embeddings = self.proj(pixel_values) _, _, height, width = embeddings.shape embeddings = embeddings.flatten(2).transpose(1, 2) embeddings = self.layer_norm(embeddings) return embeddings, height, width class PvtV2DepthWiseConv(nn.Module): """ Depth-wise (DW) convolution to infuse positional information using zero-padding. Depth-wise convolutions have an equal number of groups to the number of input channels, meaning one filter per input channel. This reduces the overall parameters and compute costs since the key purpose of this layer is position encoding. """ def __init__(self, config: PvtV2Config, dim: int = 768): super().__init__() self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) def forward(self, hidden_states, height, width): batch_size, seq_len, num_channels = hidden_states.shape hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) hidden_states = self.dwconv(hidden_states) hidden_states = hidden_states.flatten(2).transpose(1, 2) return hidden_states class PvtV2SelfAttention(nn.Module): """Efficient self-attention mechanism.""" def __init__(self, config: PvtV2Config, hidden_size: int, num_attention_heads: int, spatial_reduction_ratio: int): super().__init__() self.linear_attention = config.linear_attention self.pruned_heads = set() self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads if self.hidden_size % self.num_attention_heads != 0: raise ValueError( f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " f"heads ({self.num_attention_heads})" ) self.attention_head_size = int(self.hidden_size / self.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) self.attn_drop = nn.Dropout(config.attention_probs_dropout_prob) self.proj = nn.Linear(self.hidden_size, self.hidden_size) self.proj_drop = nn.Dropout(config.hidden_dropout_prob) self.spatial_reduction_ratio = spatial_reduction_ratio if self.linear_attention: self.pool = nn.AdaptiveAvgPool2d(7) self.spatial_reduction = nn.Conv2d(self.hidden_size, self.hidden_size, kernel_size=1, stride=1) self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) self.act = nn.GELU() elif spatial_reduction_ratio > 1: self.spatial_reduction = nn.Conv2d( self.hidden_size, self.hidden_size, kernel_size=spatial_reduction_ratio, stride=spatial_reduction_ratio ) self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) def transpose_for_scores(self, hidden_states) -> torch.Tensor: new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) hidden_states = hidden_states.view(new_shape) return hidden_states.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False, ) -> tuple[torch.Tensor]: batch_size, seq_len, num_channels = hidden_states.shape query_layer = self.transpose_for_scores(self.query(hidden_states)) if self.linear_attention: hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = ( self.spatial_reduction(self.pool(hidden_states)).reshape(batch_size, num_channels, -1).permute(0, 2, 1) ) hidden_states = self.act(self.layer_norm(hidden_states)) elif self.spatial_reduction_ratio > 1: hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) hidden_states = ( self.spatial_reduction(hidden_states).reshape(batch_size, num_channels, -1).permute(0, 2, 1) ) hidden_states = self.layer_norm(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.attn_drop(attention_probs) context_layer = (attention_probs @ value_layer).transpose(1, 2).reshape(batch_size, seq_len, num_channels) context_layer = self.proj(context_layer) context_layer = self.proj_drop(context_layer) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads ) # Prune linear layers self.query = prune_linear_layer(self.query, index) self.key = prune_linear_layer(self.key, index) self.value = prune_linear_layer(self.value, index) self.proj = prune_linear_layer(self.proj, index, dim=1) # Update hyper params and store pruned heads self.num_attention_heads = self.num_attention_heads - len(heads) self.all_head_size = self.attention_head_size * self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) class PvtV2ConvFeedForwardNetwork(nn.Module): def __init__( self, config: PvtV2Config, in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, ): super().__init__() out_features = out_features if out_features is not None else in_features self.dense1 = nn.Linear(in_features, hidden_features) self.dwconv = PvtV2DepthWiseConv(config, hidden_features) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.dense2 = nn.Linear(hidden_features, out_features) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.relu = nn.ReLU() if config.linear_attention else nn.Identity() def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.relu(hidden_states) hidden_states = self.dwconv(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.dense2(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class PvtV2BlockLayer(nn.Module): def __init__(self, config: PvtV2Config, layer_idx: int, drop_path: float = 0.0): super().__init__() hidden_size: int = config.hidden_sizes[layer_idx] num_attention_heads: int = config.num_attention_heads[layer_idx] spatial_reduction_ratio: int = config.sr_ratios[layer_idx] mlp_ratio: float = config.mlp_ratios[layer_idx] self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) self.attention = PvtV2SelfAttention( config=config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, spatial_reduction_ratio=spatial_reduction_ratio, ) self.drop_path = PvtV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) mlp_hidden_size = int(hidden_size * mlp_ratio) self.mlp = PvtV2ConvFeedForwardNetwork(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): self_attention_outputs = self.attention( hidden_states=self.layer_norm_1(hidden_states), height=height, width=width, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] attention_output = self.drop_path(attention_output) hidden_states = attention_output + hidden_states mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) mlp_output = self.drop_path(mlp_output) layer_output = hidden_states + mlp_output outputs = (layer_output,) + outputs return outputs class PvtV2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: PvtV2Config, layer_idx: int): super().__init__() self.patch_embedding = PvtV2OverlapPatchEmbeddings( config=config, layer_idx=layer_idx, ) # Transformer block # stochastic depth decay rule drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").tolist() block_layers = [] for block_idx in range(config.depths[layer_idx]): block_layers.append( PvtV2BlockLayer( config=config, layer_idx=layer_idx, drop_path=drop_path_decays[sum(config.depths[:layer_idx]) + block_idx], ) ) self.blocks = nn.ModuleList(block_layers) # Layer norm self.layer_norm = nn.LayerNorm(config.hidden_sizes[layer_idx], eps=config.layer_norm_eps) def forward(self, hidden_states, output_attentions): all_self_attentions = () if output_attentions else None # first, obtain patch embeddings hidden_states, height, width = self.patch_embedding(hidden_states) # second, send embeddings through blocks for block in self.blocks: layer_outputs = block(hidden_states, height, width, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions += (layer_outputs[1],) # third, apply layer norm hidden_states = self.layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (all_self_attentions,) return outputs, height, width class PvtV2Encoder(nn.Module): def __init__(self, config: PvtV2Config): super().__init__() self.config = config self.gradient_checkpointing = False # encoder layers self.layers = nn.ModuleList([PvtV2EncoderLayer(config, i) for i in range(config.num_encoder_blocks)]) def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None batch_size = pixel_values.shape[0] hidden_states = pixel_values for idx, layer in enumerate(self.layers): layer_output = layer(hidden_states, output_attentions) outputs, height, width = layer_output hidden_states = outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (outputs[1],) # reshape back to (batch_size, num_channels, height, width) hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class PvtV2PreTrainedModel(PreTrainedModel): config: PvtV2Config base_model_prefix = "pvt_v2" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, nn.Linear): # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid # `trunc_normal_cpu` not implemented in `half` issues module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() @auto_docstring class PvtV2Model(PvtV2PreTrainedModel): def __init__(self, config: PvtV2Config): super().__init__(config) self.config = config # hierarchical Transformer encoder self.encoder = PvtV2Encoder(config) # Initialize weights and apply final processing self.post_init() def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" Pvt-v2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """ ) class PvtV2ForImageClassification(PvtV2PreTrainedModel): def __init__(self, config: PvtV2Config) -> None: super().__init__(config) self.num_labels = config.num_labels self.pvt_v2 = PvtV2Model(config) # Classifier head self.classifier = ( nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor], labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.pvt_v2( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # convert last hidden states to (batch_size, height*width, hidden_size) batch_size = sequence_output.shape[0] # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) sequence_output = sequence_output.permute(0, 2, 3, 1) sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1]) # global average pooling sequence_output = sequence_output.mean(dim=1) logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" PVTv2 backbone, to be used with frameworks like DETR and MaskFormer. """ ) class PvtV2Backbone(PvtV2Model, BackboneMixin): def __init__(self, config: PvtV2Config): super().__init__(config) super()._init_backbone(config) self.num_features = config.hidden_sizes @auto_docstring def forward( self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: r""" Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") >>> model = AutoBackbone.from_pretrained( ... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 256, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) outputs = self.encoder( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict, ) hidden_states = outputs.hidden_states feature_maps = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None, ) __all__ = ["PvtV2ForImageClassification", "PvtV2Model", "PvtV2PreTrainedModel", "PvtV2Backbone"]
transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py/0
{ "file_path": "transformers/src/transformers/models/pvt_v2/modeling_pvt_v2.py", "repo_id": "transformers", "token_count": 11351 }
527
# coding=utf-8 # Copyright 2024 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RecurrentGemma model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithNoAttention, CausalLMOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.import_utils import is_torchdynamo_compiling from .configuration_recurrent_gemma import RecurrentGemmaConfig logger = logging.get_logger(__name__) _MAX_SQRT_GRADIENT = 1000.0 # Copied from transformers.models.gemma.modeling_gemma.GemmaRMSNorm with Gemma->RecurrentGemma class RecurrentGemmaRMSNorm(nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.zeros(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()) # Llama does x.to(float16) * w whilst RecurrentGemma is (x * w).to(float16) # See https://github.com/huggingface/transformers/pull/29402 output = output * (1.0 + self.weight.float()) return output.type_as(x) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.eps}" class RecurrentGemmaRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, dim, base=10000, device=None): super().__init__() self.dim = dim self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim)) self.register_buffer("inv_freq", tensor=inv_freq, persistent=False) @torch.no_grad() def forward(self, x, position_ids, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] self.inv_freq.to(x.device) inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) position_ids_expanded = position_ids[:, None, :].float() # Force float32 since bfloat16 loses precision on long contexts # See https://github.com/huggingface/transformers/pull/29285 device_type = x.device.type device_type = device_type if device_type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() sin = emb.sin() return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class RecurrentGemmaSdpaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: RecurrentGemmaConfig): super().__init__() self.config = config self.attention_dropout = config.attention_dropout self.hidden_size = config.hidden_size self.num_attention_heads = config.num_attention_heads self.head_dim = config.head_dim self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads self.partial_rotary_factor = config.partial_rotary_factor self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=config.attention_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=True) self.rotary_emb = RecurrentGemmaRotaryEmbedding( int(self.partial_rotary_factor * self.head_dim), base=config.rope_theta, ) def forward( self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: bool = False, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_attention_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) # Partial rotary embedding query_rot, query_pass = torch.chunk(query_states, int(1 / self.partial_rotary_factor), dim=-1) key_rot, key_pass = torch.chunk(key_states, int(1 / self.partial_rotary_factor), dim=-1) query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if use_cache and hasattr(self, "key_states"): cache_kwargs = {"cache_position": cache_position} key_states, value_states = self._update_cache(key_states, value_states, **cache_kwargs) key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] attn_output = torch.nn.functional.scaled_dot_product_attention( query_states.contiguous(), key_states.contiguous(), value_states.contiguous(), attn_mask=causal_mask, # pretty much a must for sliding window backend! dropout_p=self.attention_dropout if self.training else 0.0, scale=self.head_dim**-0.5, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output def _setup_cache(self, batch_size, device, dtype=None): if dtype is None and self.config.dtype is not None: dtype = self.config.dtype dtype = dtype if dtype is not None else torch.float32 cache_shape = (batch_size, self.num_key_value_heads, self.config.attention_window_size, self.head_dim) self.value_states = torch.zeros(cache_shape, dtype=dtype, device=device) self.key_states = torch.zeros(cache_shape, dtype=dtype, device=device) @torch.no_grad() def _update_cache(self, key_states, value_states, **cache_kwargs): """ torch.compile compatible sliding window. Computes the `indices` based on `cache_position >= self.config.attention_window_size - 1`. The `to_shift` is only true once we are above attention_window_size. Thus with `attention_window_size==64`: indices = (slicing + to_shift[-1].int()-1) % self.config.attention_window_size tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 0]) We overwrite the cache using these, then we always write at cache_position (clamped to `attention_window_size`) """ cache_position = cache_kwargs.get("cache_position") if cache_position.shape[0] > self.config.attention_window_size: # int indexing -> device sync? in compile, use tensor k_out = key_states[:, :, -self.config.attention_window_size :, :] v_out = value_states[:, :, -self.config.attention_window_size :, :] else: slicing = torch.ones( self.config.attention_window_size, dtype=torch.long, device=value_states.device ).cumsum(0) cache_position = cache_position.clamp(0, self.config.attention_window_size - 1) to_shift = cache_position >= self.config.attention_window_size - 1 indices = (slicing + to_shift[-1].int() - 1) % self.config.attention_window_size k_out, v_out = self.key_states.to(key_states.device), self.value_states.to(value_states.device) k_out = k_out[:, :, indices] v_out = v_out[:, :, indices] k_out[:, :, cache_position] = key_states.to(k_out.dtype) v_out[:, :, cache_position] = value_states.to(v_out.dtype) self.key_states, self.value_states = k_out, v_out return k_out, v_out class SqrtBoundDerivative(torch.autograd.Function): """Computes a square root with a gradient clipped at `_MAX_SQRT_GRADIENT`.""" @staticmethod def forward(ctx, x: torch.Tensor) -> torch.Tensor: """The forward pass, which is a normal `sqrt`.""" ctx.save_for_backward(x) return torch.sqrt(x) @staticmethod def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: """The backward pass, which clips the `sqrt` gradient.""" (x,) = ctx.saved_tensors clipped_x_times_4 = torch.clip(4.0 * x, min=1 / (_MAX_SQRT_GRADIENT**2)) return grad_output / torch.sqrt(clipped_x_times_4) class RecurrentGemmaRglru(nn.Module): """A Real-Gated Linear Recurrent Unit (RG-LRU) layer.""" def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.block_width = config.lru_width // self.num_attention_heads self.recurrent_param = nn.Parameter(torch.empty([config.lru_width])) self.input_gate_weight = nn.Parameter( torch.empty([self.num_attention_heads, self.block_width, self.block_width]) ) self.input_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width])) self.recurrent_gate_weight = nn.Parameter( torch.empty([self.num_attention_heads, self.block_width, self.block_width]) ) self.recurrent_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width])) self.recurrent_states = None def forward( self, activations: torch.Tensor, position_ids: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: batch_size, seq_len, lru_width = activations.shape reset = position_ids[:, :, None] == 0 reshape_act = activations.reshape(batch_size * seq_len, self.num_attention_heads, self.block_width) reshape_act = reshape_act.permute(1, 0, 2) res = torch.baddbmm(self.input_gate_bias[:, None, :], reshape_act, self.input_gate_weight) input_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width)) res = torch.baddbmm(self.recurrent_gate_bias[:, None, :], reshape_act, self.recurrent_gate_weight) recurrent_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width)) # Compute the parameter `A` of the recurrence. log_recurrent_gate = -8.0 * recurrent_gate * nn.functional.softplus(self.recurrent_param) recurrent_gate = torch.exp(log_recurrent_gate) a_square = torch.exp(2 * log_recurrent_gate) # Gate the input. gated_inputs = activations * input_gate # Apply gamma normalization to the input. We need to clip the derivatives of # `sqrt` in order to prevent NaNs during training in bfloat16. TODO a bit annoying multiplier = 1 tracing = isinstance(activations, torch.fx.Proxy) or is_torchdynamo_compiling() if not torch.jit.is_tracing() and not tracing: multiplier = SqrtBoundDerivative.apply(1 - a_square) multiplier = reset + ~reset * multiplier normalized_x = gated_inputs * multiplier.type(activations.dtype) hidden_states, recurrent_states = self._rnn_scan( hidden_states=normalized_x, recurrent_gate=recurrent_gate, reset=reset, recurrent_states=self.recurrent_states, ) self.recurrent_states = recurrent_states return hidden_states # TODO refactor def _rnn_scan( self, hidden_states: torch.Tensor, recurrent_gate: torch.Tensor, reset: torch.Tensor, recurrent_states: Union[torch.Tensor, None], acc_dtype: torch.dtype = torch.float32, ) -> tuple[torch.Tensor, torch.Tensor]: """Runs the recurrence of a linear RNN. Args: hidden_states: The input sequence. recurrent_gate: The diagonal of the recurrence matrix `A`. reset: Indicator of document boundaries, e.g. when to reset the hidden state of the RNN. recurrent_states: The initial hidden state. acc_dtype: The data type for the accumulation. Returns: The output of the linear recurrence. """ # Multiply `a` by the reset. recurrent_gate = recurrent_gate * ~reset if hidden_states.shape[1] == 1: # Using scan in sampling mode. if recurrent_states is None: # same here, when decoding you always have cache return hidden_states, hidden_states[:, 0].type(acc_dtype) else: contextualized_states = recurrent_gate.type(acc_dtype) * recurrent_states[:, None].to( recurrent_gate.device ) contextualized_states += hidden_states.type(acc_dtype) return contextualized_states.type(hidden_states.dtype), contextualized_states[:, -1] else: # Using scan in linear mode. if recurrent_states is None: recurrent_states = torch.zeros(hidden_states[:, 0].shape, dtype=acc_dtype, device=hidden_states.device) contextualized_states = torch.zeros_like(hidden_states) for t in range(hidden_states.shape[1]): recurrent_states = recurrent_gate[:, t].type(acc_dtype) * recurrent_states.to(recurrent_gate.device) recurrent_states = recurrent_states + hidden_states[:, t].type(acc_dtype) contextualized_states[:, t] = recurrent_states.type(hidden_states.dtype) return contextualized_states, recurrent_states class RecurrentGemmaRecurrentBlock(nn.Module): """Griffin and Hawk's recurrent block.""" def __init__(self, config): super().__init__() self.lru_width = config.lru_width self.hidden_size = config.hidden_size self.linear_y = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width) self.linear_x = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width) self.linear_out = nn.Linear(in_features=config.lru_width, out_features=config.hidden_size) self.conv1d_width = config.conv1d_width self.conv_1d = nn.Conv1d( config.lru_width, config.lru_width, kernel_size=config.conv1d_width, groups=config.lru_width, padding=config.conv1d_width - 1, ) self.rg_lru = RecurrentGemmaRglru(config) self.act_fn = ACT2FN[config.hidden_activation] self.conv1d_state = None def forward( self, input_states: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: torch.Tensor, use_cache: bool = True, ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: _, seq_len, _ = input_states.shape y_branch = self.linear_y(input_states) y_branch = self.act_fn(y_branch) x_branch = self.linear_x(input_states) x_branch = x_branch.transpose(1, 2) if use_cache: if cache_position.shape[0] != 1: # prefill self.conv1d_state = nn.functional.pad(x_branch, (self.conv1d_width - x_branch.shape[-1] - 1, 0)) x_branch = self.conv_1d(x_branch)[..., :seq_len] else: # decoding conv_state = torch.cat((self.conv1d_state, x_branch), -1) x_branch = torch.sum(conv_state * self.conv_1d.weight[:, 0, :], dim=-1) + self.conv_1d.bias x_branch = x_branch.unsqueeze(-1) self.conv1d_state = conv_state[:, :, 1:] else: x_branch = self.conv_1d(x_branch)[..., :seq_len] x_branch = self.rg_lru(x_branch.transpose(1, 2), position_ids) hidden_states = x_branch * y_branch hidden_states = self.linear_out(hidden_states) return hidden_states def _setup_cache(self, batch, device, dtype): # recurrent_states always computed in full precision self.rg_lru.recurrent_states = torch.zeros((batch, self.lru_width), device=device, dtype=torch.float32) self.conv1d_state = torch.zeros((batch, self.hidden_size, self.conv1d_width - 1), device=device, dtype=dtype) TEMPORAL_BLOCK_CLASSES = {"recurrent": RecurrentGemmaRecurrentBlock, "attention": RecurrentGemmaSdpaAttention} class RecurrentGemmaMlp(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size // 2 self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=True) self.act_fn = ACT2FN[config.hidden_activation] def forward(self, hidden_states): gate = self.act_fn(self.gate_proj(hidden_states)) return self.down_proj(gate * self.up_proj(hidden_states)) class RecurrentGemmaDecoderLayer(GradientCheckpointingLayer): """Griffin and Hawk's residual block.""" def __init__(self, config, layer_idx): super().__init__() self.temporal_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.temporal_block = TEMPORAL_BLOCK_CLASSES[config.layers_block_type[layer_idx]](config) self.channel_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.mlp_block = RecurrentGemmaMlp(config) def forward( self, activations: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: raw_activations = activations inputs_normalized = self.temporal_pre_norm(raw_activations) # RMSNorm introduces slight slight differences hidden_states = self.temporal_block( inputs_normalized, position_ids, attention_mask, cache_position=cache_position, use_cache=use_cache ) residual = hidden_states + raw_activations hidden_states = self.channel_pre_norm(residual) hidden_states = self.mlp_block(hidden_states) hidden_states = hidden_states + residual return hidden_states @auto_docstring class RecurrentGemmaPreTrainedModel(PreTrainedModel): config: RecurrentGemmaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["RecurrentGemmaDecoderLayer"] _skip_keys_device_placement = ["cache"] _supports_flash_attn = False _supports_sdpa = False # we can't compare with eager for now def _init_weights(self, module): std = math.sqrt(self.config.w_init_variance_scale / self.config.conv1d_width) if isinstance(module, nn.Conv1d): torch.nn.init.normal_(module.weight, mean=0.0, std=std) torch.nn.init.zeros_(module.bias) elif isinstance(module, RecurrentGemmaSdpaAttention): torch.nn.init.normal_(module.q_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size)) torch.nn.init.normal_(module.k_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size)) torch.nn.init.normal_(module.v_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size)) std = math.sqrt(self.config.final_w_init_variance_scale / self.config.hidden_size) torch.nn.init.normal_(module.o_proj.weight, mean=0.0, std=std) elif isinstance(module, RecurrentGemmaRecurrentBlock): torch.nn.init.zeros_(module.linear_x.bias) torch.nn.init.normal_(module.linear_x.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size)) torch.nn.init.zeros_(module.linear_y.bias) torch.nn.init.normal_(module.linear_y.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size)) std = math.sqrt(self.config.final_w_init_variance_scale / self.config.lru_width) torch.nn.init.normal_(module.linear_out.weight, mean=0.0, std=std) torch.nn.init.zeros_(module.linear_out.bias) elif isinstance(module, RecurrentGemmaRglru): std = math.sqrt( self.config.w_init_variance_scale / (self.config.lru_width // self.config.num_attention_heads) ) torch.nn.init.normal_(module.input_gate_weight, mean=0.0, std=std) torch.nn.init.normal_(module.recurrent_gate_weight, mean=0.0, std=std) torch.nn.init.zeros_(module.input_gate_bias) torch.nn.init.zeros_(module.recurrent_gate_bias) module.recurrent_param.data.uniform_(0.9**2 + 1e-8, 0.999**2 + 1e-8) module.recurrent_param.data.log_().mul_(0.5) module.recurrent_param.data.neg_().exp_().sub_(1.0).log_() elif isinstance(module, nn.Linear): torch.nn.init.normal_(module.weight, mean=0.0, std=std) if getattr(module, "bias", None) is not None: torch.nn.init.zeros_(module.bias) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, RecurrentGemmaRMSNorm): module.weight.data.fill_(1.0) def _setup_cache(self, config, batch, device, dtype): layers = getattr(self, "model", self).layers for layer in layers: layer.temporal_block._setup_cache(batch, device, dtype) def reset_cache(self, batch, device, dtype): pass @auto_docstring class RecurrentGemmaModel(RecurrentGemmaPreTrainedModel): def __init__(self, config: RecurrentGemmaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [RecurrentGemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.final_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False self.register_buffer( "normalizer", torch.tensor(self.config.hidden_size**0.5, dtype=torch.bfloat16), persistent=False ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, cache_position: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds if use_cache and inputs_embeds.shape[1] != 1: # TODO let's maybe only call in the `generate`? self._setup_cache(self.config, hidden_states.shape[0], hidden_states.device, hidden_states.dtype) if cache_position is None: cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position) hidden_states = hidden_states * self.normalizer.type(hidden_states.dtype) all_hidden_states = () if output_hidden_states else None for i, residual_block in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = residual_block(hidden_states, position_ids, causal_mask, cache_position, use_cache) hidden_states = self.final_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=hidden_states, hidden_states=all_hidden_states, ) # Ignore copy def _update_causal_mask(self, attention_mask, input_tensor, cache_position): dtype, device = input_tensor.dtype, input_tensor.device min_dtype = torch.finfo(dtype).min sequence_length = input_tensor.shape[1] target_length = max(self.config.attention_window_size, sequence_length) diagonal = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) causal_mask = diagonal if sequence_length != 1: causal_mask = torch.triu(diagonal, diagonal=-1) causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit if attention_mask.dim() == 2: # Crop the attention mask to the target length. attention_mask = attention_mask[:, -target_length:] mask_length = attention_mask.shape[-1] padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) if attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"]: # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask # TODO: re-enable check: Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->RECURRENTGEMMA,Llama->RecurrentGemma,llama->gemma @auto_docstring class RecurrentGemmaForCausalLM(RecurrentGemmaPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): super().__init__(config) self.model = RecurrentGemmaModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @auto_docstring # Ignore copy def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, use_cache: Optional[bool] = None, **kwargs, ) -> Union[tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, RecurrentGemmaForCausalLM >>> model = RecurrentGemmaForCausalLM.from_pretrained("google/recurrentgemma-2b") >>> tokenizer = AutoTokenizer.from_pretrained("google/recurrentgemma-2b") >>> prompt = "What is your favorite condiment?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is your favorite condiment?" ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True outputs = self.model( input_ids=input_ids, position_ids=position_ids, cache_position=cache_position, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) # Soft-cap the logits TODO remove if always done. # if self.config.logits_soft_cap is not None: cap = self.config.logits_soft_cap logits = nn.functional.tanh(logits / cap) * cap loss = None if labels is not None: # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, ) __all__ = ["RecurrentGemmaForCausalLM", "RecurrentGemmaModel", "RecurrentGemmaPreTrainedModel"]
transformers/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py/0
{ "file_path": "transformers/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py", "repo_id": "transformers", "token_count": 15536 }
528
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RemBERT checkpoint.""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def convert_rembert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path): # Initialise PyTorch model config = RemBertConfig.from_json_file(bert_config_file) print(f"Building PyTorch model from configuration: {str(config)}") model = RemBertModel(config) # Load weights from tf checkpoint load_tf_weights_in_rembert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/rembert/convert_rembert_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 775 }
529
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax RoFormer model.""" from typing import Callable, Optional import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_roformer import RoFormerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "junnyu/roformer_chinese_base" _CONFIG_FOR_DOC = "RoFormerConfig" ROFORMER_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`RoFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ ROFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.marian.modeling_flax_marian.create_sinusoidal_positions def create_sinusoidal_positions(n_pos, dim): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) sentinel = dim // 2 + dim % 2 out = np.zeros_like(position_enc) out[:, 0:sentinel] = np.sin(position_enc[:, 0::2]) out[:, sentinel:] = np.cos(position_enc[:, 1::2]) return jnp.array(out) class FlaxRoFormerEmbeddings(nn.Module): """Construct the embeddings from word and token_type embeddings.""" config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxRoFormerSelfAttention(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.rotary_value = self.config.rotary_value def __call__( self, hidden_states, attention_mask, sinusoidal_pos, layer_head_mask, deterministic=True, output_attentions: bool = False, ): head_dim = self.config.hidden_size // self.config.num_attention_heads query_states = self.query(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) value_states = self.value(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) key_states = self.key(hidden_states).reshape( hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim) ) if sinusoidal_pos is not None: if self.rotary_value: query_states, key_states, value_states = self.apply_rotary_position_embeddings( sinusoidal_pos, query_states, key_states, value_states ) else: query_states, key_states = self.apply_rotary_position_embeddings( sinusoidal_pos, query_states, key_states ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs @staticmethod def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None): sin, cos = jnp.split(sinusoidal_pos, 2, axis=-1) sin_pos = jnp.stack([sin, sin], axis=-1).reshape(sinusoidal_pos.shape) cos_pos = jnp.stack([cos, cos], axis=-1).reshape(sinusoidal_pos.shape) def rotate_layer(layer, sin_pos, cos_pos): rotate_half_layer = jnp.stack([-layer[..., 1::2], layer[..., ::2]], axis=-1).reshape(layer.shape) rotary_matrix_cos = jnp.einsum("bslh,...sh->bslh", layer, cos_pos) rotary_matrix_sin = jnp.einsum("bslh,...sh->bslh", rotate_half_layer, sin_pos) return rotary_matrix_cos + rotary_matrix_sin query_layer = rotate_layer(query_layer, sin_pos, cos_pos) key_layer = rotate_layer(key_layer, sin_pos, cos_pos) if value_layer is not None: value_layer = rotate_layer(value_layer, sin_pos, cos_pos) return query_layer, key_layer, value_layer return query_layer, key_layer # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->RoFormer class FlaxRoFormerSelfOutput(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class FlaxRoFormerAttention(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxRoFormerSelfAttention(self.config, dtype=self.dtype) self.output = FlaxRoFormerSelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, sinusoidal_pos, layer_head_mask, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states, attention_mask, sinusoidal_pos, layer_head_mask=layer_head_mask, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->RoFormer class FlaxRoFormerIntermediate(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->RoFormer class FlaxRoFormerOutput(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states class FlaxRoFormerLayer(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxRoFormerAttention(self.config, dtype=self.dtype) self.intermediate = FlaxRoFormerIntermediate(self.config, dtype=self.dtype) self.output = FlaxRoFormerOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, sinusiodal_pos, layer_head_mask, deterministic: bool = True, output_attentions: bool = False, ): attention_outputs = self.attention( hidden_states, attention_mask, sinusiodal_pos, layer_head_mask=layer_head_mask, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) return outputs class FlaxRoFormerLayerCollection(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layers = [ FlaxRoFormerLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, sinusoidal_pos, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, sinusoidal_pos, layer_head_mask=head_mask[i] if head_mask is not None else None, deterministic=deterministic, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states,) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxRoFormerEncoder(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embed_positions = create_sinusoidal_positions( self.config.max_position_embeddings, self.config.hidden_size // self.config.num_attention_heads ) self.layer = FlaxRoFormerLayerCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): sinusoidal_pos = self.embed_positions[: hidden_states.shape[1], :] return self.layer( hidden_states, attention_mask, sinusoidal_pos, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPredictionHeadTransform with Bert->RoFormer class FlaxRoFormerPredictionHeadTransform(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLMPredictionHead with Bert->RoFormer class FlaxRoFormerLMPredictionHead(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = FlaxRoFormerPredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOnlyMLMHead with Bert->RoFormer class FlaxRoFormerOnlyMLMHead(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxRoFormerLMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states class FlaxRoFormerClassificationHead(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states class FlaxRoFormerPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RoFormerConfig base_model_prefix = "roformer" module_class: nn.Module = None def __init__( self, config: RoFormerConfig, input_shape: tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_ids, attention_mask, token_type_ids, head_mask, return_dict=False )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, head_mask=None, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(head_mask, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) class FlaxRoFormerModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.embeddings = FlaxRoFormerEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRoFormerEncoder(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): hidden_states = self.embeddings(input_ids, token_type_ids, attention_mask, deterministic=deterministic) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if not return_dict: return (hidden_states,) + outputs[1:] return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( "The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.", ROFORMER_START_DOCSTRING, ) class FlaxRoFormerModel(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerModule append_call_sample_docstring(FlaxRoFormerModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) class FlaxRoFormerForMaskedLMModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype) self.cls = FlaxRoFormerOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roformer( input_ids, attention_mask, token_type_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roformer.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""RoFormer Model with a `language modeling` head on top.""", ROFORMER_START_DOCSTRING) class FlaxRoFormerForMaskedLM(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerForMaskedLMModule append_call_sample_docstring( FlaxRoFormerForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC, mask="<mask>", ) class FlaxRoFormerForSequenceClassificationModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype) self.classifier = FlaxRoFormerClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roformer( input_ids, attention_mask, token_type_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROFORMER_START_DOCSTRING, ) class FlaxRoFormerForSequenceClassification(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerForSequenceClassificationModule append_call_sample_docstring( FlaxRoFormerForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) class FlaxRoFormerForMultipleChoiceModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) # Model outputs = self.roformer( input_ids, attention_mask, token_type_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Equivalent to sequence_summary call in the PyTorch implementation hidden_states = outputs[0] pooled_output = hidden_states[:, -1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROFORMER_START_DOCSTRING, ) class FlaxRoFormerForMultipleChoice(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerForMultipleChoiceModule overwrite_call_docstring( FlaxRoFormerForMultipleChoice, ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) append_call_sample_docstring( FlaxRoFormerForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC, ) class FlaxRoFormerForTokenClassificationModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roformer( input_ids, attention_mask, token_type_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ROFORMER_START_DOCSTRING, ) class FlaxRoFormerForTokenClassification(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerForTokenClassificationModule append_call_sample_docstring( FlaxRoFormerForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC, ) class FlaxRoFormerForQuestionAnsweringModule(nn.Module): config: RoFormerConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.roformer = FlaxRoFormerModule(config=self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roformer( input_ids, attention_mask, token_type_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = jnp.split(logits, self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ROFORMER_START_DOCSTRING, ) class FlaxRoFormerForQuestionAnswering(FlaxRoFormerPreTrainedModel): module_class = FlaxRoFormerForQuestionAnsweringModule append_call_sample_docstring( FlaxRoFormerForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) __all__ = [ "FlaxRoFormerForMaskedLM", "FlaxRoFormerForMultipleChoice", "FlaxRoFormerForQuestionAnswering", "FlaxRoFormerForSequenceClassification", "FlaxRoFormerForTokenClassification", "FlaxRoFormerModel", "FlaxRoFormerPreTrainedModel", ]
transformers/src/transformers/models/roformer/modeling_flax_roformer.py/0
{ "file_path": "transformers/src/transformers/models/roformer/modeling_flax_roformer.py", "repo_id": "transformers", "token_count": 17177 }
530
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_rt_detr_v2.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Baidu Inc and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class RTDetrV2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RTDetrV2Model`]. It is used to instantiate a RT-DETR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RT-DETR architecture. e.g. [PekingU/rtdetr_r18vd](https://huggingface.co/PekingU/rtdetr_r18vd) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: initializer_range (`float`, *optional*, defaults to 0.01): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_bias_prior_prob (`float`, *optional*): The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`. If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. batch_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the batch normalization layers. backbone_config (`Dict`, *optional*, defaults to `RTDetrV2ResNetConfig()`): The configuration of the backbone model. backbone (`str`, *optional*): Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. use_pretrained_backbone (`bool`, *optional*, defaults to `False`): Whether to use pretrained weights for the backbone. use_timm_backbone (`bool`, *optional*, defaults to `False`): Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. backbone_kwargs (`dict`, *optional*): Keyword arguments to be passed to AutoBackbone when loading from a checkpoint e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): Multi level features input for encoder. feat_strides (`list[int]`, *optional*, defaults to `[8, 16, 32]`): Strides used in each feature map. encoder_layers (`int`, *optional*, defaults to 1): Total of layers to be used by the encoder. encoder_ffn_dim (`int`, *optional*, defaults to 1024): Dimension of the "intermediate" (often named feed-forward) layer in decoder. encoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. dropout (`float`, *optional*, defaults to 0.0): The ratio for all dropout layers. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. encode_proj_layers (`list[int]`, *optional*, defaults to `[2]`): Indexes of the projected layers to be used in the encoder. positional_encoding_temperature (`int`, *optional*, defaults to 10000): The temperature parameter used to create the positional encodings. encoder_activation_function (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. activation_function (`str`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the general layer. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. eval_size (`tuple[int, int]`, *optional*): Height and width used to compute the effective height and width of the position embeddings after taking into account the stride. normalize_before (`bool`, *optional*, defaults to `False`): Determine whether to apply layer normalization in the transformer encoder layer before self-attention and feed-forward modules. hidden_expansion (`float`, *optional*, defaults to 1.0): Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer. d_model (`int`, *optional*, defaults to 256): Dimension of the layers exclude hybrid encoder. num_queries (`int`, *optional*, defaults to 300): Number of object queries. decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`): Multi level features dimension for decoder decoder_ffn_dim (`int`, *optional*, defaults to 1024): Dimension of the "intermediate" (often named feed-forward) layer in decoder. num_feature_levels (`int`, *optional*, defaults to 3): The number of input feature levels. decoder_n_points (`int`, *optional*, defaults to 4): The number of sampled keys in each feature level for each attention head in the decoder. decoder_layers (`int`, *optional*, defaults to 6): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer decoder. decoder_activation_function (`str`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the decoder. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_denoising (`int`, *optional*, defaults to 100): The total number of denoising tasks or queries to be used for contrastive denoising. label_noise_ratio (`float`, *optional*, defaults to 0.5): The fraction of denoising labels to which random noise should be added. box_noise_scale (`float`, *optional*, defaults to 1.0): Scale or magnitude of noise to be added to the bounding boxes. learn_initial_query (`bool`, *optional*, defaults to `False`): Indicates whether the initial query embeddings for the decoder should be learned during training anchor_image_size (`tuple[int, int]`, *optional*): Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied. with_box_refine (`bool`, *optional*, defaults to `True`): Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes based on the predictions from the previous layer. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether the architecture has an encoder decoder structure. matcher_alpha (`float`, *optional*, defaults to 0.25): Parameter alpha used by the Hungarian Matcher. matcher_gamma (`float`, *optional*, defaults to 2.0): Parameter gamma used by the Hungarian Matcher. matcher_class_cost (`float`, *optional*, defaults to 2.0): The relative weight of the class loss used by the Hungarian Matcher. matcher_bbox_cost (`float`, *optional*, defaults to 5.0): The relative weight of the bounding box loss used by the Hungarian Matcher. matcher_giou_cost (`float`, *optional*, defaults to 2.0): The relative weight of the giou loss of used by the Hungarian Matcher. use_focal_loss (`bool`, *optional*, defaults to `True`): Parameter informing if focal loss should be used. auxiliary_loss (`bool`, *optional*, defaults to `True`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. focal_loss_alpha (`float`, *optional*, defaults to 0.75): Parameter alpha used to compute the focal loss. focal_loss_gamma (`float`, *optional*, defaults to 2.0): Parameter gamma used to compute the focal loss. weight_loss_vfl (`float`, *optional*, defaults to 1.0): Relative weight of the varifocal loss in the object detection loss. weight_loss_bbox (`float`, *optional*, defaults to 5.0): Relative weight of the L1 bounding box loss in the object detection loss. weight_loss_giou (`float`, *optional*, defaults to 2.0): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.0001): Relative classification weight of the 'no-object' class in the object detection loss. decoder_n_levels (`int`, *optional*, defaults to 3): The number of feature levels used by the decoder. decoder_offset_scale (`float`, *optional*, defaults to 0.5): Scaling factor applied to the attention offsets in the decoder. decoder_method (`str`, *optional*, defaults to `"default"`): The method to use for the decoder: `"default"` or `"discrete"`. Examples: ```python >>> from transformers import RTDetrV2Config, RTDetrV2Model >>> # Initializing a RT-DETR configuration >>> configuration = RTDetrV2Config() >>> # Initializing a model (with random weights) from the configuration >>> model = RTDetrV2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "rt_detr_v2" layer_types = ["basic", "bottleneck"] attribute_map = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self, initializer_range=0.01, initializer_bias_prior_prob=None, layer_norm_eps=1e-5, batch_norm_eps=1e-5, # backbone backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, freeze_backbone_batch_norms=True, backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], feat_strides=[8, 16, 32], encoder_layers=1, encoder_ffn_dim=1024, encoder_attention_heads=8, dropout=0.0, activation_dropout=0.0, encode_proj_layers=[2], positional_encoding_temperature=10000, encoder_activation_function="gelu", activation_function="silu", eval_size=None, normalize_before=False, hidden_expansion=1.0, # decoder RTDetrV2Transformer d_model=256, num_queries=300, decoder_in_channels=[256, 256, 256], decoder_ffn_dim=1024, num_feature_levels=3, decoder_n_points=4, decoder_layers=6, decoder_attention_heads=8, decoder_activation_function="relu", attention_dropout=0.0, num_denoising=100, label_noise_ratio=0.5, box_noise_scale=1.0, learn_initial_query=False, anchor_image_size=None, with_box_refine=True, is_encoder_decoder=True, # Loss matcher_alpha=0.25, matcher_gamma=2.0, matcher_class_cost=2.0, matcher_bbox_cost=5.0, matcher_giou_cost=2.0, use_focal_loss=True, auxiliary_loss=True, focal_loss_alpha=0.75, focal_loss_gamma=2.0, weight_loss_vfl=1.0, weight_loss_bbox=5.0, weight_loss_giou=2.0, eos_coefficient=1e-4, decoder_n_levels=3, # default value decoder_offset_scale=0.5, # default value decoder_method="default", **kwargs, ): super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) self.initializer_range = initializer_range self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps # backbone if backbone_config is None and backbone is None: logger.info( "`backbone_config` and `backbone` are `None`. Initializing the config with the default `RTDetrV2-ResNet` backbone." ) backbone_model_type = "rt_detr_resnet" config_class = CONFIG_MAPPING[backbone_model_type] # this will map it to RTDetrResNetConfig # note: we can instead create RTDetrV2ResNetConfig but it will be exactly the same as V1 # and we would need to create RTDetrV2ResNetModel backbone_config = config_class( num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type="bottleneck", hidden_act="relu", downsample_in_first_stage=False, downsample_in_bottleneck=False, out_features=None, out_indices=[2, 3, 4], ) elif isinstance(backbone_config, dict): backbone_model_type = backbone_config.pop("model_type") config_class = CONFIG_MAPPING[backbone_model_type] backbone_config = config_class.from_dict(backbone_config) verify_backbone_config_arguments( use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs, ) self.backbone_config = backbone_config self.backbone = backbone self.use_pretrained_backbone = use_pretrained_backbone self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels self.feat_strides = feat_strides self.encoder_ffn_dim = encoder_ffn_dim self.dropout = dropout self.activation_dropout = activation_dropout self.encode_proj_layers = encode_proj_layers self.encoder_layers = encoder_layers self.positional_encoding_temperature = positional_encoding_temperature self.eval_size = eval_size self.normalize_before = normalize_before self.encoder_activation_function = encoder_activation_function self.activation_function = activation_function self.hidden_expansion = hidden_expansion self.num_queries = num_queries self.decoder_ffn_dim = decoder_ffn_dim self.decoder_in_channels = decoder_in_channels self.num_feature_levels = num_feature_levels self.decoder_n_points = decoder_n_points self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_activation_function = decoder_activation_function self.attention_dropout = attention_dropout self.num_denoising = num_denoising self.label_noise_ratio = label_noise_ratio self.box_noise_scale = box_noise_scale self.learn_initial_query = learn_initial_query self.anchor_image_size = anchor_image_size self.auxiliary_loss = auxiliary_loss self.with_box_refine = with_box_refine # Loss self.matcher_alpha = matcher_alpha self.matcher_gamma = matcher_gamma self.matcher_class_cost = matcher_class_cost self.matcher_bbox_cost = matcher_bbox_cost self.matcher_giou_cost = matcher_giou_cost self.use_focal_loss = use_focal_loss self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.weight_loss_vfl = weight_loss_vfl self.weight_loss_bbox = weight_loss_bbox self.weight_loss_giou = weight_loss_giou self.eos_coefficient = eos_coefficient if not hasattr(self, "d_model"): self.d_model = d_model if not hasattr(self, "encoder_attention_heads"): self.encoder_attention_heads = encoder_attention_heads # add the new attributes with the given values or defaults self.decoder_n_levels = decoder_n_levels self.decoder_offset_scale = decoder_offset_scale self.decoder_method = decoder_method @property def sub_configs(self): return ( {"backbone_config": type(self.backbone_config)} if getattr(self, "backbone_config", None) is not None else {} ) @classmethod def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs): """Instantiate a [`RTDetrV2Config`] (or a derived class) from a pre-trained backbone model configuration and DETR model configuration. Args: backbone_config ([`PretrainedConfig`]): The backbone configuration. Returns: [`RTDetrV2Config`]: An instance of a configuration object """ return cls( backbone_config=backbone_config, **kwargs, ) __all__ = ["RTDetrV2Config"]
transformers/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py/0
{ "file_path": "transformers/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py", "repo_id": "transformers", "token_count": 8234 }
531
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert SAM-HQ checkpoints from the original repository. URL: https://github.com/SysCV/sam-hq """ import argparse import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SamHQConfig, SamHQModel, SamHQProcessor, SamHQVisionConfig, SamImageProcessor def get_config(model_name): if "sam_hq_vit_b" in model_name: vision_config = SamHQVisionConfig() vit_dim = 768 # Base model dimension elif "sam_hq_vit_l" in model_name: vision_config = SamHQVisionConfig( hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) vit_dim = 1024 # Large model dimension elif "sam_hq_vit_h" in model_name: vision_config = SamHQVisionConfig( hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) vit_dim = 1280 # Huge model dimension # Create mask decoder config with appropriate vit_dim mask_decoder_config = {"vit_dim": vit_dim} config = SamHQConfig( vision_config=vision_config, mask_decoder_config=mask_decoder_config, ) return config KEYS_TO_MODIFY_MAPPING = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", # HQ-specific mappings "mask_decoder.hf_token": "mask_decoder.hq_token", "mask_decoder.compress_vit_feat.0": "mask_decoder.compress_vit_conv1", "mask_decoder.compress_vit_feat.1": "mask_decoder.compress_vit_norm", "mask_decoder.compress_vit_feat.3": "mask_decoder.compress_vit_conv2", "mask_decoder.embedding_encoder.0": "mask_decoder.encoder_conv1", "mask_decoder.embedding_encoder.1": "mask_decoder.encoder_norm", "mask_decoder.embedding_encoder.3": "mask_decoder.encoder_conv2", "mask_decoder.embedding_maskfeature.0": "mask_decoder.mask_conv1", "mask_decoder.embedding_maskfeature.1": "mask_decoder.mask_norm", "mask_decoder.embedding_maskfeature.3": "mask_decoder.mask_conv2", "mask_decoder.hf_mlp": "mask_decoder.hq_mask_mlp", # Add patterns for the output_hypernetworks_mlps and hq_mask_mlp "output_hypernetworks_mlps.0.layers.0": "output_hypernetworks_mlps.0.proj_in", "output_hypernetworks_mlps.0.layers.1": "output_hypernetworks_mlps.0.layers.0", "output_hypernetworks_mlps.0.layers.2": "output_hypernetworks_mlps.0.proj_out", "output_hypernetworks_mlps.1.layers.0": "output_hypernetworks_mlps.1.proj_in", "output_hypernetworks_mlps.1.layers.1": "output_hypernetworks_mlps.1.layers.0", "output_hypernetworks_mlps.1.layers.2": "output_hypernetworks_mlps.1.proj_out", "output_hypernetworks_mlps.2.layers.0": "output_hypernetworks_mlps.2.proj_in", "output_hypernetworks_mlps.2.layers.1": "output_hypernetworks_mlps.2.layers.0", "output_hypernetworks_mlps.2.layers.2": "output_hypernetworks_mlps.2.proj_out", "output_hypernetworks_mlps.3.layers.0": "output_hypernetworks_mlps.3.proj_in", "output_hypernetworks_mlps.3.layers.1": "output_hypernetworks_mlps.3.layers.0", "output_hypernetworks_mlps.3.layers.2": "output_hypernetworks_mlps.3.proj_out", "hq_mask_mlp.layers.0": "hq_mask_mlp.proj_in", "hq_mask_mlp.layers.1": "hq_mask_mlp.layers.0", "hq_mask_mlp.layers.2": "hq_mask_mlp.proj_out", } def replace_keys(state_dict): model_state_dict = {} state_dict.pop("pixel_mean", None) state_dict.pop("pixel_std", None) # Process each key in the state dict for key, value in state_dict.items(): new_key = key # Apply static mappings from KEYS_TO_MODIFY_MAPPING for key_to_modify, replacement in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in new_key: new_key = new_key.replace(key_to_modify, replacement) model_state_dict[new_key] = value # Add mapping for shared embedding for positional embedding if "prompt_encoder.shared_embedding.positional_embedding" in model_state_dict: model_state_dict["shared_image_embedding.positional_embedding"] = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] # Special handling for IOU prediction head keys # Check if we're missing the expected keys and have the converted ones instead if ( "mask_decoder.iou_prediction_head.layers.0.weight" not in model_state_dict and "mask_decoder.iou_prediction_head.proj_in.weight" in model_state_dict ): # Copy the converted key back to the expected format model_state_dict["mask_decoder.iou_prediction_head.layers.0.weight"] = model_state_dict[ "mask_decoder.iou_prediction_head.proj_in.weight" ] model_state_dict["mask_decoder.iou_prediction_head.layers.0.bias"] = model_state_dict[ "mask_decoder.iou_prediction_head.proj_in.bias" ] return model_state_dict def convert_sam_hq_checkpoint(model_name, checkpoint_path, pytorch_dump_folder, push_to_hub, hub_path): config = get_config(model_name) state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True) state_dict = replace_keys(state_dict) image_processor = SamImageProcessor() processor = SamHQProcessor(image_processor=image_processor) hf_model = SamHQModel(config) hf_model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" hf_model.load_state_dict(state_dict) hf_model = hf_model.to(device) # Test the model with a sample image img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") input_points = [[[500, 375]]] input_labels = [[1]] # Basic test without prompts inputs = processor(images=np.array(raw_image), return_tensors="pt").to(device) with torch.no_grad(): hf_model(**inputs) if model_name == "sam_hq_vit_b": inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): hf_model(**inputs) elif model_name == "sam_hq_vit_h": inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): hf_model(**inputs) input_boxes = [[[75.0, 275.0, 1725.0, 850.0]]] inputs = processor(images=np.array(raw_image), input_boxes=input_boxes, return_tensors="pt").to(device) with torch.no_grad(): hf_model(**inputs) input_points = [[[400, 650], [800, 650]]] input_labels = [[1, 1]] inputs = processor( images=np.array(raw_image), input_points=input_points, input_labels=input_labels, return_tensors="pt" ).to(device) with torch.no_grad(): hf_model(**inputs) if pytorch_dump_folder is not None: processor.save_pretrained(pytorch_dump_folder) hf_model.save_pretrained(pytorch_dump_folder) if push_to_hub: repo_id = f"{hub_path}/{model_name}" processor.push_to_hub(repo_id) hf_model.push_to_hub(repo_id) if __name__ == "__main__": parser = argparse.ArgumentParser() choices = ["sam_hq_vit_b", "sam_hq_vit_h", "sam_hq_vit_l"] parser.add_argument( "--model_name", choices=choices, type=str, required=True, help="Name of the SAM-HQ model to convert", ) parser.add_argument( "--checkpoint_path", type=str, required=False, help="Path to the SAM-HQ checkpoint (.pth file)", ) parser.add_argument( "--pytorch_dump_folder_path", type=str, default=None, help="Path to save the converted model", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the converted model to the hub", ) parser.add_argument( "--hub_path", type=str, default="sushmanth", help="Hugging Face Hub path where the model will be uploaded", ) args = parser.parse_args() checkpoint_path = args.checkpoint_path if checkpoint_path is None: checkpoint_path = hf_hub_download("lkeab/hq-sam", f"{args.model_name}.pth") convert_sam_hq_checkpoint( args.model_name, checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.hub_path, )
transformers/src/transformers/models/sam_hq/convert_samhq_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/sam_hq/convert_samhq_to_hf.py", "repo_id": "transformers", "token_count": 4482 }
532
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for SegGPT.""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, logging, requires_backends if is_torch_available(): import torch logger = logging.get_logger(__name__) # See https://huggingface.co/papers/2212.02499 at 3.1 Redefining Output Spaces as "Images" - Semantic Segmentation from PAINTER paper # Taken from https://github.com/Abdullah-Meda/Painter/blob/main/Painter/data/coco_semseg/gen_color_coco_panoptic_segm.py#L31 def build_palette(num_labels: int) -> list[tuple[int, int]]: base = int(num_labels ** (1 / 3)) + 1 margin = 256 // base # we assume that class_idx 0 is the background which is mapped to black color_list = [(0, 0, 0)] for location in range(num_labels): num_seq_r = location // base**2 num_seq_g = (location % base**2) // base num_seq_b = location % base R = 255 - num_seq_r * margin G = 255 - num_seq_g * margin B = 255 - num_seq_b * margin color_list.append((R, G, B)) return color_list def mask_to_rgb( mask: np.ndarray, palette: Optional[list[tuple[int, int]]] = None, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: data_format = data_format if data_format is not None else ChannelDimension.FIRST if palette is not None: height, width = mask.shape rgb_mask = np.zeros((3, height, width), dtype=np.uint8) classes_in_mask = np.unique(mask) for class_idx in classes_in_mask: rgb_value = palette[class_idx] class_mask = (mask == class_idx).astype(np.uint8) class_mask = np.expand_dims(class_mask, axis=-1) class_rgb_mask = class_mask * np.array(rgb_value) class_rgb_mask = np.moveaxis(class_rgb_mask, -1, 0) rgb_mask += class_rgb_mask.astype(np.uint8) rgb_mask = np.clip(rgb_mask, 0, 255).astype(np.uint8) else: rgb_mask = np.repeat(mask[None, ...], 3, axis=0) return to_channel_dimension_format(rgb_mask, data_format) class SegGptImageProcessor(BaseImageProcessor): r""" Constructs a SegGpt image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `(size["height"], size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`): Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the prompt mask to RGB format. Can be overridden by the `do_convert_rgb` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Optional[dict[str, int]] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 448, "width": 448} size = get_size_dict(size) self.do_resize = do_resize self.do_rescale = do_rescale self.do_normalize = do_normalize self.size = size self.resample = resample self.rescale_factor = rescale_factor self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_convert_rgb = do_convert_rgb def get_palette(self, num_labels: int) -> list[tuple[int, int]]: """Build a palette to map the prompt mask from a single channel to a 3 channel RGB. Args: num_labels (`int`): Number of classes in the segmentation task (excluding the background). Returns: `list[tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB. """ return build_palette(num_labels) def mask_to_rgb( self, image: np.ndarray, palette: Optional[list[tuple[int, int]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Converts a segmentation map to RGB format. Args: image (`np.ndarray`): Segmentation map with dimensions (height, width) where pixel values represent the class index. palette (`list[tuple[int, int]]`, *optional*, defaults to `None`): Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel dimension. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The mask in RGB format. """ return mask_to_rgb(image, palette=palette, data_format=data_format) # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC def resize( self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])`. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. Returns: `np.ndarray`: The resized image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") output_size = (size["height"], size["width"]) return resize( image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def _preprocess_step( self, images: ImageInput, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, do_convert_rgb: Optional[bool] = None, num_labels: Optional[int] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format. num_labels: (`int`, *optional*): Number of classes in the segmentation task (excluding the background). If specified, a palette will be built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed through as is if it is already in RGB format or being duplicated across the channel dimension. """ do_resize = do_resize if do_resize is not None else self.do_resize do_rescale = do_rescale if do_rescale is not None else self.do_rescale do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb resample = resample if resample is not None else self.resample rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size_dict = get_size_dict(size) # If segmentation map is passed we expect 2D images images = make_list_of_images(images, expected_ndims=2 if do_convert_rgb else 3) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None and not do_convert_rgb: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) if do_convert_rgb: palette = self.get_palette(num_labels) if num_labels is not None else None # Since this is the input for the next transformations its format should be the same as the input_data_format images = [ self.mask_to_rgb(image=image, palette=palette, data_format=ChannelDimension.FIRST) for image in images ] input_data_format = ChannelDimension.FIRST if do_resize: images = [ self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images ] if do_rescale: images = [ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images ] if do_normalize: images = [ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images ] images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images ] return images def preprocess( self, images: Optional[ImageInput] = None, prompt_images: Optional[ImageInput] = None, prompt_masks: Optional[ImageInput] = None, do_resize: Optional[bool] = None, size: Optional[dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, do_convert_rgb: Optional[bool] = None, num_labels: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. prompt_images (`ImageInput`): Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. prompt_masks (`ImageInput`): Prompt mask from prompt image to _preprocess that specify prompt_masks value in the preprocessed output. Can either be in the format of segmentation maps (no channels) or RGB images. If in the format of RGB images, `do_convert_rgb` should be set to `False`. If in the format of segmentation maps, `num_labels` specifying `num_labels` is recommended to build a palette to map the prompt mask from a single channel to a 3 channel RGB. If `num_labels` is not specified, the prompt mask will be duplicated across the channel dimension. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`): `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format. num_labels: (`int`, *optional*): Number of classes in the segmentation task (excluding the background). If specified, a palette will be built, assuming that class_idx 0 is the background, to map the prompt mask from a plain segmentation map with no channels to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed through as is if it is already in RGB format (if `do_convert_rgb` is false) or being duplicated across the channel dimension. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ if all(v is None for v in [images, prompt_images, prompt_masks]): raise ValueError("At least one of images, prompt_images, prompt_masks must be specified.") data = {} if images is not None: images = self._preprocess_step( images, is_mask=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_convert_rgb=False, data_format=data_format, input_data_format=input_data_format, **kwargs, ) data["pixel_values"] = images if prompt_images is not None: prompt_images = self._preprocess_step( prompt_images, is_mask=False, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_convert_rgb=False, data_format=data_format, input_data_format=input_data_format, **kwargs, ) data["prompt_pixel_values"] = prompt_images if prompt_masks is not None: prompt_masks = self._preprocess_step( prompt_masks, do_resize=do_resize, size=size, resample=PILImageResampling.NEAREST, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_convert_rgb=do_convert_rgb, num_labels=num_labels, data_format=data_format, input_data_format=input_data_format, **kwargs, ) data["prompt_masks"] = prompt_masks return BatchFeature(data=data, tensor_type=return_tensors) def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None, num_labels: Optional[int] = None ): """ Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports PyTorch. Args: outputs ([`SegGptImageSegmentationOutput`]): Raw outputs of the model. target_sizes (`list[tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`tuple[int, int]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. num_labels (`int`, *optional*): Number of classes in the segmentation task (excluding the background). If specified, a palette will be built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class indices. This value should be the same used when preprocessing inputs. Returns: semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ requires_backends(self, ["torch"]) # batch_size x num_channels x 2*height x width masks = outputs.pred_masks # Predicted mask and prompt are concatenated in the height dimension # batch_size x num_channels x height x width masks = masks[:, :, masks.shape[2] // 2 :, :] # To unnormalize we need to permute to channel last # batch_size x height x width x num_channels std = torch.tensor(self.image_std).to(masks.device) mean = torch.tensor(self.image_mean).to(masks.device) masks = masks.permute(0, 2, 3, 1) * std + mean # batch_size x num_channels x height x width masks = masks.permute(0, 3, 1, 2) # Clip to match with palette if specified masks = torch.clip(masks * 255, 0, 255) semantic_segmentation = [] palette_tensor = None palette = self.get_palette(num_labels) if num_labels is not None else None if palette is not None: palette_tensor = torch.tensor(palette).to(device=masks.device, dtype=torch.float) _, num_channels, _, _ = masks.shape palette_tensor = palette_tensor.view(1, 1, num_labels + 1, num_channels) for idx, mask in enumerate(masks): if target_sizes is not None: mask = torch.nn.functional.interpolate( mask.unsqueeze(0), size=target_sizes[idx], mode="nearest", )[0] if num_labels is not None: channels, height, width = mask.shape dist = mask.permute(1, 2, 0).view(height, width, 1, channels) dist = dist - palette_tensor dist = torch.pow(dist, 2) dist = torch.sum(dist, dim=-1) pred = dist.argmin(dim=-1) else: # If no palette is specified SegGpt will try to paint using the mask class idx as RGB pred = mask.mean(dim=0).int() semantic_segmentation.append(pred) return semantic_segmentation __all__ = ["SegGptImageProcessor"]
transformers/src/transformers/models/seggpt/image_processing_seggpt.py/0
{ "file_path": "transformers/src/transformers/models/seggpt/image_processing_seggpt.py", "repo_id": "transformers", "token_count": 13551 }
533
# coding=utf-8 # Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections.abc import Mapping, Sequence from typing import Optional from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import Unpack from ...utils import logging from ..gemma3.processing_gemma3 import Gemma3Processor, Gemma3ProcessorKwargs logger = logging.get_logger(__name__) DEFAULT_SHIELDGEMMA2_POLICIES: Mapping[str, str] = { "dangerous": ( "No Dangerous Content: The image shall not contain content that" " facilitates or encourages activities that could cause real-world harm" " (e.g., building firearms and explosive devices, promotion of" " terrorism, instructions for suicide)." ), "sexual": ( "No Sexually Explicit content: The image shall not contain content that" " depicts explicit or graphic sexual acts (e.g., pornography, erotic" " nudity, depictions of rape or sexual assault)." ), "violence": ( "No Violence/Gore content: The image shall not contain content that" " depicts shocking, sensational, or gratuitous violence (e.g.," " excessive blood and gore, gratuitous violence against animals," " extreme injury or moment of death)." ), } class ShieldGemma2ProcessorKwargs(Gemma3ProcessorKwargs, total=False): policies: Optional[Sequence[str]] custom_policies: Optional[Mapping[str, str]] _defaults = { "text_kwargs": { "padding": True, }, "images_kwargs": { "do_pan_and_scan": False, }, } class ShieldGemma2Processor(Gemma3Processor): def __init__( self, image_processor, tokenizer, chat_template=None, image_seq_length=256, policy_definitions=None, **kwargs ): """A processor for the ShieldGemma 2 model. Args: image_processor: The image processor to use, typically a `Gemma3ImageProcessorFast` instance. tokenizer: The tokenizer to use, typically a `GemmaTokenizerFast` instance. chat_template: The chat template to use with this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes this value already. image_seq_length: The number of soft tokens per image. Typically, this is unset as the processor configuration on Hugging Face Hub includes this value already. policy_definitions: A mapping from policy name to its description in text used as the default policies to classify images against. The policy descriptions are included in the text of the prompts generated by this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes the base policies ShieldGemma was trained on. """ super().__init__(image_processor, tokenizer, chat_template, image_seq_length, **kwargs) if policy_definitions is None: self.policy_definitions = DEFAULT_SHIELDGEMMA2_POLICIES else: self.policy_definitions = policy_definitions def __call__( self, images: ImageInput = None, text=None, videos=None, audio=None, **kwargs: Unpack[ShieldGemma2ProcessorKwargs], ) -> BatchFeature: """Generates a batch of inputs from the provided images. ShieldGemma was trained to classify image content for policy compliance using a specific prompt construction. This processor generates a batch of such prompts from the provided images by: 1. Creating a list of conversations, one for each `<image, policy>` pair; 2. Converting these conversations to text using `self.apply_chat_template()`; and 3. Encoding the conversations and images using the same techniques as `Gemma3Processor`. Args: images: A single image or a list of images to include in the batch. text: Not supported. videos: Not supported. audio: Not supported. kwargs: An optional dictionary of keyword arguments to configure the processor. Possible values include: * `custom_policies`: Additional policy definitions that augment the `self.policy_definitions` passed into the constructor. Note that `custom_policies` that share a key with `self.policy_definitions` will override the policy description * `policies`: (Optional) a list of keys in the joint `self.policy_definitions | custom_policies` dictionary of specific interest for the provided images. If empty or None, prompts will be generated for every key in the joint dictionary. Returns: A `BatchFeature` containing `input_ids`, `pixel_values`, etc. where each Tensor is of shape `(len(images) * len(policies), )`, and the order within the batch will be img1_policy1, ... img1_policyN, ... imgM_policyN. """ del text, videos, audio if not images: raise ValueError("ShieldGemma 2 needs images to classify") elif not isinstance(images, Sequence): images = [images] if not self.chat_template: raise ValueError("ShieldGemma 2 requires the use of a specific chat template") # Disable pan and scan images_kwargs = kwargs.setdefault("images_kwargs", {}) if images_kwargs.get("do_pan_and_scan") is True: logger.warning_once("ShieldGemma2 does not support pan and scan.") images_kwargs["do_pan_and_scan"] = False # Enable padding on the batch during tokenization text_kwargs = kwargs.setdefault("text_kwargs", {}) if "padding" not in text_kwargs: text_kwargs["padding"] = kwargs.pop("padding", True) text_kwargs["padding_side"] = kwargs.pop("padding_side", "left") policy_definitions: Mapping[str, str] = { **self.policy_definitions, **kwargs.get("custom_policies", {}), } if (policies := kwargs.get("policies")) is None: policies = list(policy_definitions.keys()) # TODO(ryanmullins): Support images from PIL or URLs. messages = [] expanded_images = [] for img in images: for policy in policies: messages.append( [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": policy_definitions[policy]}, ], } ] ) expanded_images.append([img]) text = self.apply_chat_template(messages, tokenize=False) return super().__call__(images=expanded_images, text=text, **kwargs) __all__ = ["ShieldGemma2Processor"]
transformers/src/transformers/models/shieldgemma2/processing_shieldgemma2.py/0
{ "file_path": "transformers/src/transformers/models/shieldgemma2/processing_shieldgemma2.py", "repo_id": "transformers", "token_count": 3021 }
534
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for SigLIP2. """ from typing import Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack from ...tokenization_utils_base import PreTokenizedInput, TextInput class Siglip2ImagesKwargs(ImagesKwargs, total=False): max_num_patches: Optional[int] patch_size: Optional[int] class Siglip2ProcessorKwargs(ProcessingKwargs, total=False): images_kwargs: Siglip2ImagesKwargs _defaults = { "text_kwargs": { "padding": "max_length", "truncation": True, "max_length": 64, }, "images_kwargs": { "max_num_patches": 256, "patch_size": 16, }, } class Siglip2Processor(ProcessorMixin): r""" Constructs a Siglip2 processor which wraps a Siglip2 image processor and a Gemma tokenizer into a single processor. [`Siglip2Processor`] offers all the functionalities of [`Siglip2ImageProcessor`] and [`GemmaTokenizerFast`]. See the [`~Siglip2Processor.__call__`] and [`~Siglip2Processor.decode`] for more information. Args: image_processor ([`Siglip2ImageProcessor`]): The image processor is a required input. tokenizer ([`GemmaTokenizerFast`]): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "AutoImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer): super().__init__(image_processor, tokenizer) def __call__( self, images: Optional[Union[ImageInput, list[ImageInput], list[list[ImageInput]]]] = None, text: Optional[Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]]] = None, audio=None, videos=None, **kwargs: Unpack[Siglip2ProcessorKwargs], ) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to GemmaTokenizerFast's [`~GemmaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` argument to Siglip2ImageProcessor's [`~Siglip2ImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `max_length`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*, defaults to 64): Maximum length of the returned list and optionally padding length (see above). truncation (`bool`, *optional*, defaults to `True`): Activates truncation to cut input sequences longer than `max_length` to `max_length`. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'pt'`): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **pixel_attention_mask** -- Attention mask for the pixel values. Returned when `images` is not `None`. - **spatial_shapes** -- The number of horizontal and vertical patches per image. Returned when `images` is not `None`. """ output_kwargs = self._merge_kwargs( Siglip2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, **output_kwargs["text_kwargs"]) if images is not None: image_features = self.image_processor(images, **output_kwargs["images_kwargs"]) if text is not None and images is not None: encoding.update(image_features) return encoding elif text is not None: return encoding else: return_tensors = output_kwargs["common_kwargs"]["return_tensors"] return BatchFeature(data=dict(**image_features), tensor_type=return_tensors) __all__ = ["Siglip2Processor"]
transformers/src/transformers/models/siglip2/processing_siglip2.py/0
{ "file_path": "transformers/src/transformers/models/siglip2/processing_siglip2.py", "repo_id": "transformers", "token_count": 2827 }
535
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2 checkpoint.""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( Speech2Text2Config, Speech2Text2ForCausalLM, Speech2Text2Tokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Model, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights_wav2vec2(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight proj_weight = None for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True elif name.split(".")[0] == "proj": proj_weight = fairseq_model.proj is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") return proj_weight def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def create_vocab_dict(dict_path): with open(dict_path, "r", encoding="utf-8") as f: lines = f.readlines() words = [line.split(" ")[0] for line in lines] num_words = len(words) vocab_dict = { "<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, } vocab_dict.update(dict(zip(words, range(4, num_words + 4)))) return vocab_dict @torch.no_grad() def convert_wav2vec2_checkpoint( checkpoint_path, pytorch_dump_folder_path, dict_path, encoder_config_path, decoder_config_path, vocab_size, num_decoder_layers, ): """ Copy/paste/tweak model's weights to transformers design. """ encoder_config = Wav2Vec2Config.from_pretrained(encoder_config_path) decoder_config = Speech2Text2Config.from_pretrained( decoder_config_path, vocab_size=vocab_size, decoder_layers=num_decoder_layers, do_stable_layer_norm=True ) feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=True, return_attention_mask=True, ) model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])} ) model = model[0].eval() # set weights for wav2vec2 encoder hf_encoder = Wav2Vec2Model(encoder_config) projection_layer = recursively_load_weights_wav2vec2(model.encoder, hf_encoder) hf_decoder = Speech2Text2ForCausalLM(decoder_config) missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False) # set output linear layer unexpected_keys.remove("embed_out") hf_decoder.lm_head.weight = nn.Parameter(model.decoder.embed_out.detach()) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder) hf_wav2vec.config.tie_word_embeddings = False # add projection layer hf_wav2vec.enc_to_dec_proj.weight = nn.Parameter(projection_layer.weight) hf_wav2vec.enc_to_dec_proj.bias = nn.Parameter(projection_layer.bias) vocab_dict = create_vocab_dict(dict_path) with open(os.path.join(pytorch_dump_folder_path, "vocab.json"), "w") as fp: json.dump(vocab_dict, fp) tokenizer = Speech2Text2Tokenizer(os.path.join(pytorch_dump_folder_path, "vocab.json")) tokenizer.save_pretrained(pytorch_dump_folder_path) config = hf_wav2vec.config.to_dict() config["pad_token_id"] = tokenizer.pad_token_id config["bos_token_id"] = tokenizer.bos_token_id config["eos_token_id"] = tokenizer.eos_token_id config["tokenizer_class"] = "speech_to_text_2" config["feature_extractor_type"] = "wav2vec2" hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) feature_extractor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-large-lv60", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/s2t-small-mustc-en-fr-st", type=str, help="Path to hf decoder s2t checkpoint config", ) parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder") parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers") args = parser.parse_args() convert_wav2vec2_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
transformers/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py", "repo_id": "transformers", "token_count": 5271 }
536
# coding=utf-8 # Copyright 2023 The Fairseq Authors, Microsoft Research, and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch SpeechT5 model.""" import math from typing import Optional, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, L1Loss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...integrations.deepspeed import is_deepspeed_zero3_enabled from ...integrations.fsdp import is_fsdp_managed_module from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqSpectrogramOutput, ) from ...modeling_utils import EmbeddingAccessMixin, PreTrainedModel from ...utils import auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_speecht5 import SpeechT5Config, SpeechT5HifiGanConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 1 # General docstring # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def shift_spectrograms_right( input_values: torch.Tensor, reduction_factor: int = 1, attention_mask: Optional[torch.Tensor] = None ): """ Shift input spectrograms one timestep to the right. Also applies the reduction factor to the sequence length. """ # thin out frames for reduction factor if reduction_factor > 1: input_values = input_values[:, reduction_factor - 1 :: reduction_factor] if attention_mask is not None: attention_mask = attention_mask[:, reduction_factor - 1 :: reduction_factor] shifted_input_values = input_values.new_zeros(input_values.shape) shifted_input_values[:, 1:] = input_values[:, :-1].clone() # replace possible -100 values in labels by zeros shifted_input_values.masked_fill_(shifted_input_values == -100.0, 0.0) return shifted_input_values, attention_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.detach().sum(-1).tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->SpeechT5 class SpeechT5NoLayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->SpeechT5 class SpeechT5LayerNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->SpeechT5 class SpeechT5GroupNormConvLayer(GradientCheckpointingLayer): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.speech_to_text.modeling_speech_to_text.Speech2TextSinusoidalPositionalEmbedding with Speech2Text->SpeechT5 class SpeechT5SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward put the weights on the correct dtype and device of the param emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device) self.register_buffer("weights", emb_weights, persistent=False) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->SpeechT5 class SpeechT5PositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = weight_norm(self.conv, name="weight", dim=2) if hasattr(self.conv, "parametrizations"): weight_g = self.conv.parametrizations.weight.original0 weight_v = self.conv.parametrizations.weight.original1 else: weight_g = self.conv.weight_g weight_v = self.conv.weight_v deepspeed.zero.register_external_parameter(self, weight_v) deepspeed.zero.register_external_parameter(self, weight_g) else: self.conv = weight_norm(self.conv, name="weight", dim=2) self.padding = SpeechT5SamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states class SpeechT5ScaledPositionalEncoding(nn.Module): """ Scaled positional encoding, see §3.2 in https://huggingface.co/papers/1809.08895 """ def __init__(self, dropout, dim, max_len=5000): pe = torch.zeros(max_len, dim) position = torch.arange(0, max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, dim, 2, dtype=torch.int64).float() * -(math.log(10000.0) / dim)) pe[:, 0::2] = torch.sin(position.float() * div_term) pe[:, 1::2] = torch.cos(position.float() * div_term) pe = pe.unsqueeze(0) super().__init__() self.register_buffer("pe", pe, persistent=False) self.dropout = nn.Dropout(p=dropout) self.dim = dim self.alpha = nn.Parameter(torch.tensor(1.0)) def forward(self, emb): emb = emb + self.alpha * self.pe[:, : emb.size(1)] emb = self.dropout(emb) return emb class SpeechT5RelativePositionalEncoding(torch.nn.Module): def __init__(self, dim, max_length=1000): super().__init__() self.dim = dim self.max_length = max_length self.pe_k = torch.nn.Embedding(2 * max_length, dim) def forward(self, hidden_states): seq_len = hidden_states.shape[1] pos_seq = torch.arange(0, seq_len).to(device=hidden_states.device, dtype=torch.long) pos_seq = pos_seq[:, None] - pos_seq[None, :] pos_seq[pos_seq < -self.max_length] = -self.max_length pos_seq[pos_seq >= self.max_length] = self.max_length - 1 pos_seq = pos_seq + self.max_length return self.pe_k(pos_seq) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->SpeechT5 class SpeechT5SamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->SpeechT5 class SpeechT5FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [SpeechT5GroupNormConvLayer(config, layer_id=0)] + [ SpeechT5NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [ SpeechT5LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers) ] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: hidden_states = conv_layer(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->SpeechT5 class SpeechT5FeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class SpeechT5SpeechEncoderPrenet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.feature_encoder = SpeechT5FeatureEncoder(config) self.feature_projection = SpeechT5FeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.pos_conv_embed = SpeechT5PositionalConvEmbedding(config) self.pos_sinusoidal_embed = SpeechT5SinusoidalPositionalEmbedding( config.max_speech_positions + config.pad_token_id + 1, config.hidden_size, config.pad_token_id, ) def freeze_feature_encoder(self): self.feature_encoder._freeze_parameters() def forward( self, input_values: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, ): extract_features = self.feature_encoder(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, ) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) positional_conv_embedding = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + positional_conv_embedding if attention_mask is not None: padding_mask = attention_mask.ne(1).long() else: padding_mask = torch.zeros(hidden_states.shape[:2], dtype=torch.long, device=hidden_states.device) positional_sinusoidal_embeddings = self.pos_sinusoidal_embed(padding_mask) hidden_states = hidden_states + positional_sinusoidal_embeddings return hidden_states, attention_mask # Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feature_vector_attention_mask def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask # Copied from transformers.models.unispeech.modeling_unispeech.UniSpeechPreTrainedModel._get_feat_extract_output_lengths def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the convolutional layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) return input_lengths # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states class SpeechT5SpeechDecoderPrenet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layers = nn.ModuleList( [ nn.Linear( config.num_mel_bins if i == 0 else config.speech_decoder_prenet_units, config.speech_decoder_prenet_units, ) for i in range(config.speech_decoder_prenet_layers) ] ) self.final_layer = nn.Linear(config.speech_decoder_prenet_units, config.hidden_size) self.encode_positions = SpeechT5ScaledPositionalEncoding( config.positional_dropout, config.hidden_size, config.max_speech_positions, ) self.speaker_embeds_layer = nn.Linear(config.speaker_embedding_dim + config.hidden_size, config.hidden_size) def _consistent_dropout(self, inputs_embeds, p): mask = torch.bernoulli(inputs_embeds[0], p=p) all_masks = mask.unsqueeze(0).repeat(inputs_embeds.size(0), 1, 1) return torch.where(all_masks == 1, inputs_embeds, 0) * 1 / (1 - p) def forward( self, input_values: torch.Tensor, speaker_embeddings: Optional[torch.Tensor] = None, ): # Dropout is always applied, even when evaluating. See §2.2 in https://huggingface.co/papers/1712.05884. inputs_embeds = input_values for layer in self.layers: inputs_embeds = nn.functional.relu(layer(inputs_embeds)) inputs_embeds = self._consistent_dropout(inputs_embeds, self.config.speech_decoder_prenet_dropout) inputs_embeds = self.final_layer(inputs_embeds) inputs_embeds = self.encode_positions(inputs_embeds) if speaker_embeddings is not None: speaker_embeddings = nn.functional.normalize(speaker_embeddings) speaker_embeddings = speaker_embeddings.unsqueeze(1).expand(-1, inputs_embeds.size(1), -1) inputs_embeds = torch.cat([inputs_embeds, speaker_embeddings], dim=-1) inputs_embeds = nn.functional.relu(self.speaker_embeds_layer(inputs_embeds)) return inputs_embeds class SpeechT5BatchNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() if layer_id == 0: in_conv_dim = config.num_mel_bins else: in_conv_dim = config.speech_decoder_postnet_units if layer_id == config.speech_decoder_postnet_layers - 1: out_conv_dim = config.num_mel_bins else: out_conv_dim = config.speech_decoder_postnet_units self.conv = nn.Conv1d( in_conv_dim, out_conv_dim, kernel_size=config.speech_decoder_postnet_kernel, stride=1, padding=(config.speech_decoder_postnet_kernel - 1) // 2, bias=False, ) self.batch_norm = nn.BatchNorm1d(out_conv_dim) if layer_id < config.speech_decoder_postnet_layers - 1: self.activation = nn.Tanh() else: self.activation = None self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.batch_norm(hidden_states) if self.activation is not None: hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class SpeechT5SpeechDecoderPostnet(nn.Module): def __init__(self, config): super().__init__() self.config = config self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor) self.prob_out = nn.Linear(config.hidden_size, config.reduction_factor) self.layers = nn.ModuleList( [SpeechT5BatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)] ) def forward(self, hidden_states: torch.Tensor): outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins) outputs_after_postnet = self.postnet(outputs_before_postnet) logits = self.prob_out(hidden_states).view(hidden_states.size(0), -1) return outputs_before_postnet, outputs_after_postnet, logits def postnet(self, hidden_states: torch.Tensor): layer_output = hidden_states.transpose(1, 2) for layer in self.layers: layer_output = layer(layer_output) return hidden_states + layer_output.transpose(1, 2) class SpeechT5TextEncoderPrenet(nn.Module, EmbeddingAccessMixin): def __init__(self, config): super().__init__() self.config = config self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.encode_positions = SpeechT5ScaledPositionalEncoding( config.positional_dropout, config.hidden_size, config.max_text_positions, ) def forward(self, input_ids: torch.Tensor): inputs_embeds = self.embed_tokens(input_ids) inputs_embeds = self.encode_positions(inputs_embeds) return inputs_embeds class SpeechT5TextDecoderPrenet(nn.Module, EmbeddingAccessMixin): def __init__(self, config): super().__init__() self.config = config self.dropout = nn.Dropout(config.positional_dropout) self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id) self.embed_positions = SpeechT5SinusoidalPositionalEmbedding( config.max_text_positions + config.pad_token_id + 1, config.hidden_size, config.pad_token_id, ) def forward( self, input_ids: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, ): if input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) else: raise ValueError("You have to specify `decoder_input_ids`") past_key_values_length = 0 if past_key_values is not None: past_key_values_length = ( past_key_values[0][0].shape[-2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length() ) positions = self.embed_positions(input_ids, past_key_values_length) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale inputs_embeds += positions inputs_embeds = self.dropout(inputs_embeds) return inputs_embeds, attention_mask class SpeechT5TextDecoderPostnet(nn.Module, EmbeddingAccessMixin): def __init__(self, config): super().__init__() self.config = config self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) def forward(self, hidden_states: torch.Tensor): return self.lm_head(hidden_states) def get_output_embeddings(self): # Post-net has no token embeddings, but its lm_head must still be # tied to the decoder weights when `tie_word_embeddings=True`. return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings class SpeechT5Attention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper with relative position bias (see https://aclanthology.org/N18-2074.pdf) """ def __init__( self, embed_dim: int, num_heads: int, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, layer_idx: Optional[bool] = None, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # relative attention bias if position_bias is not None: reshape_q = query_states.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0, 1) rel_pos_bias = torch.matmul(reshape_q, position_bias.transpose(-2, -1)) rel_pos_bias = rel_pos_bias.transpose(0, 1).view( bsz * self.num_heads, position_bias.size(0), position_bias.size(1) ) attn_weights += rel_pos_bias if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class SpeechT5FeedForward(nn.Module): def __init__(self, config, intermediate_size): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class SpeechT5EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: SpeechT5Config): super().__init__() self.attention = SpeechT5Attention( embed_dim=config.hidden_size, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, is_decoder=False, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = SpeechT5FeedForward(config, config.encoder_ffn_dim) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. position_bias (`torch.FloatTensor`): relative position embeddings of size `(seq_len, seq_len, hidden_size // encoder_attention_heads)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class SpeechT5DecoderLayer(GradientCheckpointingLayer): def __init__(self, config: SpeechT5Config, layer_idx=None): super().__init__() self.self_attn = SpeechT5Attention( embed_dim=config.hidden_size, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.dropout = nn.Dropout(config.hidden_dropout) self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.encoder_attn = SpeechT5Attention( config.hidden_size, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = SpeechT5FeedForward(config, config.decoder_ffn_dim) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, hidden_size)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = self.dropout(hidden_states) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class SpeechT5PreTrainedModel(PreTrainedModel): config: SpeechT5Config base_model_prefix = "speecht5" main_input_name = "input_values" supports_gradient_checkpointing = True def _init_weights(self, module: nn.Module): """Initialize the weights""" std = self.config.initializer_range if isinstance(module, SpeechT5PositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, SpeechT5ScaledPositionalEncoding): module.alpha.data.fill_(1.0) elif isinstance(module, SpeechT5FeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm1d)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "masked_spec_embed"): nn.init.uniform_(module.masked_spec_embed) class SpeechT5Encoder(SpeechT5PreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* layers. Each layer is a [`SpeechT5EncoderLayer`]. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([SpeechT5EncoderLayer(config) for _ in range(config.encoder_layers)]) self.embed_positions = SpeechT5RelativePositionalEncoding( config.hidden_size // config.encoder_attention_heads, config.encoder_max_relative_position ) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, hidden_states: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`): Features extracted from the speech or text input by the encoder prenet. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) position_bias = self.embed_positions(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) skip_the_layer = False if self.training: dropout_probability = torch.rand([]) skip_the_layer = dropout_probability < self.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = encoder_layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class SpeechT5EncoderWithSpeechPrenet(SpeechT5PreTrainedModel): """ Wrapper around SpeechT5Encoder that applies SpeechT5SpeechEncoderPrenet to convert the audio waveform data to hidden features. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.prenet = SpeechT5SpeechEncoderPrenet(config) self.wrapped_encoder = SpeechT5Encoder(config) # Initialize weights and apply final processing self.post_init() def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: hidden_states, attention_mask = self.prenet(input_values, attention_mask) outputs = self.wrapped_encoder( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs class SpeechT5EncoderWithTextPrenet(SpeechT5PreTrainedModel): """ Wrapper around SpeechT5Encoder that applies SpeechT5TextEncoderPrenet to convert the input_ids to hidden features. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.prenet = SpeechT5TextEncoderPrenet(config) self.wrapped_encoder = SpeechT5Encoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.prenet.get_input_embeddings() def set_input_embeddings(self, value): self.prenet.set_input_embeddings(value) def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: hidden_states = self.prenet(input_values) outputs = self.wrapped_encoder( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs class SpeechT5EncoderWithoutPrenet(SpeechT5PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with [`SpeechT5Model`]. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.wrapped_encoder = SpeechT5Encoder(config) # Initialize weights and apply final processing self.post_init() def forward( self, input_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: return self.wrapped_encoder( hidden_states=input_values, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class SpeechT5Decoder(SpeechT5PreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`SpeechT5DecoderLayer`] """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([SpeechT5DecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`): Features extracted from the speech or text input by the decoder prenet. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = hidden_states.size()[:-1] if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache()) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, hidden_states, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, hidden_states.dtype, tgt_len=input_shape[-1] ) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) skip_the_layer = False if self.training: dropout_probability = torch.rand([]) skip_the_layer = dropout_probability < self.layerdrop if skip_the_layer and not synced_gpus: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class SpeechT5DecoderWithSpeechPrenet(SpeechT5PreTrainedModel): """ Wrapper around SpeechT5Decoder that applies SpeechT5SpeechDecoderPrenet to convert log-mel filterbanks to hidden features. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.prenet = SpeechT5SpeechDecoderPrenet(config) self.wrapped_decoder = SpeechT5Decoder(config) # Initialize weights and apply final processing self.post_init() def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, speaker_embeddings: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: decoder_hidden_states = self.prenet(input_values, speaker_embeddings) outputs = self.wrapped_decoder( hidden_states=decoder_hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs class SpeechT5DecoderWithTextPrenet(SpeechT5PreTrainedModel): """ Wrapper around SpeechT5Decoder that applies SpeechT5TextDecoderPrenet to convert input tokens to hidden features. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.prenet = SpeechT5TextDecoderPrenet(config) self.wrapped_decoder = SpeechT5Decoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.prenet.get_input_embeddings() def set_input_embeddings(self, value): self.prenet.set_input_embeddings(value) def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: decoder_hidden_states, attention_mask = self.prenet(input_values, attention_mask, past_key_values) outputs = self.wrapped_decoder( hidden_states=decoder_hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs class SpeechT5DecoderWithoutPrenet(SpeechT5PreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when used in combination with [`SpeechT5Model`]. """ def __init__(self, config: SpeechT5Config): super().__init__(config) self.wrapped_decoder = SpeechT5Decoder(config) # Initialize weights and apply final processing self.post_init() def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[list[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: outputs = self.wrapped_decoder( hidden_states=input_values, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) return outputs class SpeechT5GuidedMultiheadAttentionLoss(nn.Module): """ Guided attention loss from the paper [Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention](https://huggingface.co/papers/1710.08969), adapted for multi-head attention. """ def __init__(self, config: SpeechT5Config): super().__init__() self.sigma = config.guided_attention_loss_sigma self.scale = config.guided_attention_loss_scale def forward( self, attentions: torch.FloatTensor, input_masks: torch.BoolTensor, output_masks: torch.BoolTensor ) -> torch.Tensor: """ Compute the attention loss. Args: attentions (`torch.FloatTensor` of shape `(batch_size, layers * heads, output_sequence_length, input_sequence_length)`): Batch of multi-head attention weights input_masks (`torch.BoolTensor` of shape `(batch_size, input_sequence_length)`): Input attention mask as booleans. output_masks (`torch.BoolTensor` of shape `(batch_size, output_sequence_length)`): Target attention mask as booleans. Returns: `torch.Tensor` with the loss value """ guided_attn_masks = self._make_guided_attention_masks(input_masks, output_masks, attentions.device) masks = output_masks.unsqueeze(-1) & input_masks.unsqueeze(-2) masks = masks.to(attentions.device).unsqueeze(1) losses = guided_attn_masks * attentions loss = torch.mean(losses.masked_select(masks)) return self.scale * loss def _make_guided_attention_masks(self, input_masks, output_masks, device): input_lengths = input_masks.sum(-1) output_lengths = output_masks.sum(-1) guided_attn_masks = torch.zeros((len(input_masks), output_masks.shape[1], input_masks.shape[1]), device=device) for idx, (ilen, olen) in enumerate(zip(input_lengths, output_lengths)): guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(ilen, olen, self.sigma, device) return guided_attn_masks.unsqueeze(1) @staticmethod def _make_guided_attention_mask(input_length, output_length, sigma, device): grid_y, grid_x = torch.meshgrid( torch.arange(input_length, device=device), torch.arange(output_length, device=device), indexing="xy", ) grid_x = grid_x.float() / output_length grid_y = grid_y.float() / input_length return 1.0 - torch.exp(-((grid_y - grid_x) ** 2) / (2 * (sigma**2))) class SpeechT5SpectrogramLoss(nn.Module): """ Loss computation used by SpeechT5ForTextToSpeech. """ def __init__(self, config: SpeechT5Config): super().__init__() self.use_guided_attention_loss = config.use_guided_attention_loss self.guided_attention_loss_num_heads = config.guided_attention_loss_num_heads self.reduction_factor = config.reduction_factor self.l1_criterion = L1Loss() self.bce_criterion = BCEWithLogitsLoss(pos_weight=torch.tensor(5.0)) if self.use_guided_attention_loss: self.attn_criterion = SpeechT5GuidedMultiheadAttentionLoss(config) def forward( self, attention_mask: torch.LongTensor, outputs_before_postnet: torch.FloatTensor, outputs_after_postnet: torch.FloatTensor, logits: torch.FloatTensor, labels: torch.FloatTensor, cross_attentions: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: padding_mask = labels != -100.0 # mask out the padded portions labels = labels.masked_select(padding_mask) outputs_before_postnet = outputs_before_postnet.masked_select(padding_mask) outputs_after_postnet = outputs_after_postnet.masked_select(padding_mask) # spectrogram loss l1_loss = self.l1_criterion(outputs_after_postnet, labels) + self.l1_criterion(outputs_before_postnet, labels) # construct stop labels from the padding mask masks = padding_mask[:, :, 0] stop_labels = torch.cat([~masks * 1.0, torch.ones(masks.size(0), 1).to(masks.device)], dim=1) stop_labels = stop_labels[:, 1:].masked_select(masks) logits = logits.masked_select(masks) # stop token loss bce_loss = self.bce_criterion(logits, stop_labels) # combined loss loss = l1_loss + bce_loss # guided attention loss if self.use_guided_attention_loss: attn = torch.cat([x[:, : self.guided_attention_loss_num_heads] for x in cross_attentions], dim=1) input_masks = attention_mask == 1 output_masks = padding_mask[:, :, 0] if self.reduction_factor > 1: output_masks = output_masks[:, self.reduction_factor - 1 :: self.reduction_factor] attn_loss = self.attn_criterion(attn, input_masks, output_masks) loss += attn_loss return loss @auto_docstring( custom_intro=""" The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets. """ ) class SpeechT5Model(SpeechT5PreTrainedModel): def __init__( self, config: SpeechT5Config, encoder: Optional[nn.Module] = None, decoder: Optional[nn.Module] = None, ): r""" encoder (`PreTrainedModel`, *optional*): The encoder model to use. decoder (`PreTrainedModel`, *optional*): The decoder model to use. """ super().__init__(config) self.config = config self.encoder = SpeechT5EncoderWithoutPrenet(config) if encoder is None else encoder self.decoder = SpeechT5DecoderWithoutPrenet(config) if decoder is None else decoder # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet): return self.encoder.get_input_embeddings() if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet): return self.decoder.get_input_embeddings() raise NotImplementedError def set_input_embeddings(self, value): if isinstance(self.encoder, SpeechT5EncoderWithTextPrenet): self.encoder.set_input_embeddings(value) if isinstance(self.decoder, SpeechT5DecoderWithTextPrenet): self.decoder.set_input_embeddings(value) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ if isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet): self.encoder.prenet.freeze_feature_encoder() @auto_docstring def forward( self, input_values: Optional[torch.Tensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_values: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.FloatTensor], Seq2SeqModelOutput]: r""" input_values (`torch.Tensor` of shape `(batch_size, sequence_length)`): Depending on which encoder is being used, the `input_values` are either: float values of the input raw speech waveform, or indices of input sequence tokens in the vocabulary, or hidden states. decoder_input_values (`torch.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Depending on which decoder is being used, the `decoder_input_values` are either: float values of log-mel filterbank features extracted from the raw speech waveform, or indices of decoder input sequence tokens in the vocabulary, or hidden states. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_values=input_values, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # downsample encoder attention mask (only for encoders with speech input) if attention_mask is not None and isinstance(self.encoder, SpeechT5EncoderWithSpeechPrenet): encoder_attention_mask = self.encoder.prenet._get_feature_vector_attention_mask( encoder_outputs[0].shape[1], attention_mask ) else: encoder_attention_mask = attention_mask if isinstance(self.decoder, SpeechT5DecoderWithSpeechPrenet): decoder_args = {"speaker_embeddings": speaker_embeddings} else: decoder_args = {} decoder_outputs = self.decoder( input_values=decoder_input_values, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, **decoder_args, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" SpeechT5 Model with a speech encoder and a text decoder. """ ) class SpeechT5ForSpeechToText(SpeechT5PreTrainedModel, GenerationMixin): _tied_weights_keys = ["text_decoder_postnet.lm_head.weight"] def __init__(self, config: SpeechT5Config): super().__init__(config) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that does not define the" " vocabulary size of the language model head. Please instantiate the model as follows:" " `SpeechT5ForSpeechToText.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of" " your model's configuration." ) speech_encoder = SpeechT5EncoderWithSpeechPrenet(config) text_decoder = SpeechT5DecoderWithTextPrenet(config) self.speecht5 = SpeechT5Model(config, speech_encoder, text_decoder) self.text_decoder_postnet = SpeechT5TextDecoderPostnet(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.speecht5.get_encoder() def get_decoder(self): return self.speecht5.get_decoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.get_encoder().prenet.freeze_feature_encoder() def get_output_embeddings(self): return self.text_decoder_postnet.get_output_embeddings() def set_output_embeddings(self, new_embeddings): self.text_decoder_postnet.set_output_embeddings(new_embeddings) @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqLMOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) SpeechT5 uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Label indices can be obtained using [`SpeechT5Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. Example: ```python >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToText >>> from datasets import load_dataset >>> dataset = load_dataset( ... "hf-internal-testing/librispeech_asr_demo", "clean", split="validation" ... ) # doctest: +IGNORE_RESULT >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_asr") >>> model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr") >>> # audio file is decoded on the fly >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> predicted_ids = model.generate(**inputs, max_length=100) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) >>> transcription[0] 'mister quilter is the apostle of the middle classes and we are glad to welcome his gospel' ``` ```python >>> inputs["labels"] = processor(text_target=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) 19.68 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.speecht5( input_values=input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, ) logits = self.text_decoder_postnet(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def _generate_speech( model: SpeechT5PreTrainedModel, input_values: torch.FloatTensor, speaker_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, threshold: float = 0.5, minlenratio: float = 0.0, maxlenratio: float = 20.0, vocoder: Optional[nn.Module] = None, output_cross_attentions: bool = False, return_output_lengths: bool = False, ) -> Union[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor]]: if speaker_embeddings is None: raise ValueError( """`speaker_embeddings` must be specified. For example, you can use a speaker embeddings by following the code snippet provided in this link: https://huggingface.co/datasets/Matthijs/cmu-arctic-xvectors """ ) if attention_mask is None: encoder_attention_mask = 1 - (input_values == model.config.pad_token_id).int() else: encoder_attention_mask = attention_mask bsz = input_values.size(0) encoder_out = model.speecht5.encoder( input_values=input_values, attention_mask=encoder_attention_mask, return_dict=True, ) encoder_last_hidden_state = encoder_out.last_hidden_state # downsample encoder attention mask if isinstance(model.speecht5.encoder, SpeechT5EncoderWithSpeechPrenet): encoder_attention_mask = model.speecht5.encoder.prenet._get_feature_vector_attention_mask( encoder_out[0].shape[1], encoder_attention_mask ) maxlen = int(encoder_last_hidden_state.size(1) * maxlenratio / model.config.reduction_factor) minlen = int(encoder_last_hidden_state.size(1) * minlenratio / model.config.reduction_factor) # Start the output sequence with a mel spectrum that is all zeros. output_sequence = encoder_last_hidden_state.new_zeros(bsz, 1, model.config.num_mel_bins) spectrogram = [] cross_attentions = [] past_key_values = None idx = 0 result_spectrogram = {} while True: idx += 1 # Run the decoder prenet on the entire output sequence. decoder_hidden_states = model.speecht5.decoder.prenet(output_sequence, speaker_embeddings) # Run the decoder layers on the last element of the prenet output. decoder_out = model.speecht5.decoder.wrapped_decoder( hidden_states=decoder_hidden_states[:, -1:], attention_mask=None, encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=True, output_attentions=output_cross_attentions, return_dict=True, ) if output_cross_attentions: cross_attentions.append(torch.cat(decoder_out.cross_attentions, dim=0)) last_decoder_output = decoder_out.last_hidden_state.squeeze(1) past_key_values = decoder_out.past_key_values # Predict the new mel spectrum for this step in the sequence. spectrum = model.speech_decoder_postnet.feat_out(last_decoder_output) spectrum = spectrum.view(bsz, model.config.reduction_factor, model.config.num_mel_bins) spectrogram.append(spectrum) # Extend the output sequence with the new mel spectrum. new_spectrogram = spectrum[:, -1, :].view(bsz, 1, model.config.num_mel_bins) output_sequence = torch.cat((output_sequence, new_spectrogram), dim=1) # Predict the probability that this is the stop token. prob = torch.sigmoid(model.speech_decoder_postnet.prob_out(last_decoder_output)) if idx < minlen: continue else: # If the generation loop is less than maximum length time, check the ones in the batch that have met # the prob threshold. Otherwise, assume all have met thresholds and fill other spectrograms for the batch. if idx < maxlen: meet_thresholds = torch.sum(prob, dim=-1) >= threshold meet_indexes = torch.where(meet_thresholds)[0].tolist() else: meet_indexes = range(len(prob)) meet_indexes = [i for i in meet_indexes if i not in result_spectrogram] if len(meet_indexes) > 0: spectrograms = torch.stack(spectrogram) spectrograms = spectrograms.transpose(0, 1).flatten(1, 2) spectrograms = model.speech_decoder_postnet.postnet(spectrograms) for meet_index in meet_indexes: result_spectrogram[meet_index] = spectrograms[meet_index] if len(result_spectrogram) >= bsz: break spectrograms = [result_spectrogram[i] for i in range(len(result_spectrogram))] if not return_output_lengths: spectrogram = spectrograms[0] if bsz == 1 else torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True) if vocoder is not None: outputs = vocoder(spectrogram) else: outputs = spectrogram if output_cross_attentions: cross_attentions = torch.cat(cross_attentions, dim=2) if bsz > 1: cross_attentions = cross_attentions.view( bsz, int(cross_attentions.size(0) / bsz), *cross_attentions.size()[-3:] ) outputs = (outputs, cross_attentions) else: # batched return values should also include the spectrogram/waveform lengths spectrogram_lengths = [] for i in range(bsz): spectrogram_lengths.append(spectrograms[i].size(0)) if vocoder is None: spectrograms = torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True) outputs = (spectrograms, spectrogram_lengths) else: waveforms = [] spectrograms = torch.nn.utils.rnn.pad_sequence(spectrograms, batch_first=True) waveforms = vocoder(spectrograms) waveform_lengths = [int(waveforms.size(1) / max(spectrogram_lengths)) * i for i in spectrogram_lengths] outputs = (waveforms, waveform_lengths) if output_cross_attentions: cross_attentions = torch.cat(cross_attentions, dim=2) cross_attentions = cross_attentions.view( bsz, int(cross_attentions.size(0) / bsz), *cross_attentions.size()[-3:] ) outputs = (*outputs, cross_attentions) return outputs @auto_docstring( custom_intro=""" SpeechT5 Model with a text encoder and a speech decoder. """ ) class SpeechT5ForTextToSpeech(SpeechT5PreTrainedModel): main_input_name = "input_ids" def __init__(self, config: SpeechT5Config): super().__init__(config) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that does not define the" " vocabulary size of the language model head. Please instantiate the model as follows:" " `SpeechT5ForTextToSpeech.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of" " your model's configuration." ) text_encoder = SpeechT5EncoderWithTextPrenet(config) speech_decoder = SpeechT5DecoderWithSpeechPrenet(config) self.speecht5 = SpeechT5Model(config, text_encoder, speech_decoder) self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config) # Initialize weights and apply final processing self.post_init() @classmethod def can_generate(cls) -> bool: # Speecht5 has a unique model structure, where the external class (`SpeechT5ForTextToSpeech`) doesn't need to inherit from # `GenerationMixin` (it has a non-standard generation method). This means that the base `can_generate()` will return `False`, # but we need to override it so as to do `GenerationConfig` handling in multiple parts of the codebase. return True def get_encoder(self): return self.speecht5.get_encoder() def get_decoder(self): return self.speecht5.get_decoder() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_values: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, stop_labels: Optional[torch.Tensor] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqSpectrogramOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`): Float values of input mel spectrogram. SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*): Float values of target mel spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss computation. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`] for details. stop_labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Binary tensor indicating the position of the stop token in the sequence. Example: ```python >>> from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, set_seed >>> import torch >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") >>> model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt") >>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file >>> set_seed(555) # make deterministic >>> # generate speech >>> speech = model.generate(inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder) >>> speech.shape torch.Size([15872]) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_values is None: decoder_input_values, decoder_attention_mask = shift_spectrograms_right( labels, self.config.reduction_factor, decoder_attention_mask ) if self.config.use_guided_attention_loss: output_attentions = True outputs = self.speecht5( input_values=input_ids, attention_mask=attention_mask, decoder_input_values=decoder_input_values, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, use_cache=use_cache, speaker_embeddings=speaker_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, ) outputs_before_postnet, outputs_after_postnet, logits = self.speech_decoder_postnet(outputs[0]) loss = None if labels is not None: criterion = SpeechT5SpectrogramLoss(self.config) loss = criterion( attention_mask, outputs_before_postnet, outputs_after_postnet, logits, labels, outputs.cross_attentions, ) if not return_dict: output = (outputs_after_postnet,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSpectrogramOutput( loss=loss, spectrogram=outputs_after_postnet, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @torch.no_grad() def generate( self, input_ids: torch.LongTensor, attention_mask: Optional[torch.LongTensor] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, threshold: float = 0.5, minlenratio: float = 0.0, maxlenratio: float = 20.0, vocoder: Optional[nn.Module] = None, output_cross_attentions: bool = False, return_output_lengths: bool = False, **kwargs, ) -> Union[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor]]: r""" Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Attention mask from the tokenizer, required for batched inference to signal to the model where to ignore padded tokens from the input_ids. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. threshold (`float`, *optional*, defaults to 0.5): The generated sequence ends when the predicted stop token probability exceeds this value. minlenratio (`float`, *optional*, defaults to 0.0): Used to calculate the minimum required length for the output sequence. maxlenratio (`float`, *optional*, defaults to 20.0): Used to calculate the maximum allowed length for the output sequence. vocoder (`nn.Module`, *optional*): The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel spectrogram. output_cross_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of the decoder's cross-attention layers. return_output_lengths (`bool`, *optional*, defaults to `False`): Whether or not to return the concrete spectrogram/waveform lengths. Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the inputs: - when `return_output_lengths` is False - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram. - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(num_frames,)` -- The predicted speech waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. - when `return_output_lengths` is True - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that are padded to the maximum length. - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each spectrogram. - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length. - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. """ if speaker_embeddings is not None: batch_size = input_ids.size(0) if speaker_embeddings.size(0) != batch_size: if speaker_embeddings.size(0) == 1: speaker_embeddings = speaker_embeddings.repeat(batch_size, 1) else: raise ValueError( "The first dimension of speaker_embeddings must be either 1 or the same as batch_size." ) return _generate_speech( self, input_ids, speaker_embeddings, attention_mask, threshold, minlenratio, maxlenratio, vocoder, output_cross_attentions, return_output_lengths, ) @torch.no_grad() def generate_speech( self, input_ids: torch.LongTensor, speaker_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, threshold: float = 0.5, minlenratio: float = 0.0, maxlenratio: float = 20.0, vocoder: Optional[nn.Module] = None, output_cross_attentions: bool = False, return_output_lengths: bool = False, ) -> Union[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor]]: r""" Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) threshold (`float`, *optional*, defaults to 0.5): The generated sequence ends when the predicted stop token probability exceeds this value. minlenratio (`float`, *optional*, defaults to 0.0): Used to calculate the minimum required length for the output sequence. maxlenratio (`float`, *optional*, defaults to 20.0): Used to calculate the maximum allowed length for the output sequence. vocoder (`nn.Module`, *optional*, defaults to `None`): The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel spectrogram. output_cross_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of the decoder's cross-attention layers. return_output_lengths (`bool`, *optional*, defaults to `False`): Whether or not to return the concrete spectrogram/waveform lengths. Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the inputs: - when `return_output_lengths` is False - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram. - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(num_frames,)` -- The predicted speech waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. - when `return_output_lengths` is True - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that are padded to the maximum length. - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each spectrogram. - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length. - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. """ if speaker_embeddings is not None: batch_size = input_ids.size(0) if speaker_embeddings.size(0) != batch_size: if speaker_embeddings.size(0) == 1: speaker_embeddings = speaker_embeddings.repeat(batch_size, 1) else: raise ValueError( "The first dimension of speaker_embeddings must be either 1 or the same as batch size." ) return _generate_speech( self, input_ids, speaker_embeddings, attention_mask, threshold, minlenratio, maxlenratio, vocoder, output_cross_attentions, return_output_lengths, ) @auto_docstring( custom_intro=""" SpeechT5 Model with a speech encoder and a speech decoder. """ ) class SpeechT5ForSpeechToSpeech(SpeechT5PreTrainedModel): def __init__(self, config: SpeechT5Config): super().__init__(config) speech_encoder = SpeechT5EncoderWithSpeechPrenet(config) speech_decoder = SpeechT5DecoderWithSpeechPrenet(config) self.speecht5 = SpeechT5Model(config, speech_encoder, speech_decoder) self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.speecht5.get_encoder() def get_decoder(self): return self.speecht5.get_decoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.get_encoder().prenet.freeze_feature_encoder() @auto_docstring def forward( self, input_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_input_values: Optional[torch.FloatTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, speaker_embeddings: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, stop_labels: Optional[torch.Tensor] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqSpectrogramOutput]: r""" input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details. decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`): Float values of input mel spectrogram. SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see `past_key_values`). decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*): Float values of target mel spectrogram. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`] for details. stop_labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Binary tensor indicating the position of the stop token in the sequence. Example: ```python >>> from transformers import SpeechT5Processor, SpeechT5ForSpeechToSpeech, SpeechT5HifiGan, set_seed >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset( ... "hf-internal-testing/librispeech_asr_demo", "clean", split="validation" ... ) # doctest: +IGNORE_RESULT >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc") >>> model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc") >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") >>> # audio file is decoded on the fly >>> inputs = processor(audio=dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file >>> set_seed(555) # make deterministic >>> # generate speech >>> speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder) >>> speech.shape torch.Size([77824]) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_values is None: decoder_input_values, decoder_attention_mask = shift_spectrograms_right( labels, self.config.reduction_factor, decoder_attention_mask ) outputs = self.speecht5( input_values=input_values, attention_mask=attention_mask, decoder_input_values=decoder_input_values, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, use_cache=use_cache, speaker_embeddings=speaker_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, ) _, spectrogram, logits = self.speech_decoder_postnet(outputs[0]) loss = None if not return_dict: output = (spectrogram,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSpectrogramOutput( loss=loss, spectrogram=spectrogram, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @torch.no_grad() def generate_speech( self, input_values: torch.FloatTensor, speaker_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, threshold: float = 0.5, minlenratio: float = 0.0, maxlenratio: float = 20.0, vocoder: Optional[nn.Module] = None, output_cross_attentions: bool = False, return_output_lengths: bool = False, ) -> torch.FloatTensor: r""" Converts a raw speech waveform into a sequence of mel spectrograms, which are subsequently turned back into a speech waveform using a vocoder. Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`SpeechT5Processor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`SpeechT5Processor.__call__`] for details. speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*): Tensor containing the speaker embeddings. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) threshold (`float`, *optional*, defaults to 0.5): The generated sequence ends when the predicted stop token probability exceeds this value. minlenratio (`float`, *optional*, defaults to 0.0): Used to calculate the minimum required length for the output sequence. maxlenratio (`float`, *optional*, defaults to 20.0): Used to calculate the maximum allowed length for the output sequence. vocoder (`nn.Module`, *optional*, defaults to `None`): The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel spectrogram. output_cross_attentions (`bool`, *optional*, defaults to `False`): Whether or not to return the attentions tensors of the decoder's cross-attention layers. return_output_lengths (`bool`, *optional*, defaults to `False`): Whether or not to return the concrete spectrogram/waveform lengths. Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the inputs: - when `return_output_lengths` is False - **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram. - **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(num_frames,)` -- The predicted speech waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. - when `return_output_lengths` is True - **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that are padded to the maximum length. - **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each spectrogram. - **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape `(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length. - **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `list[Int]` -- A list of all the concrete lengths for each waveform. - **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`) `torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers. """ if speaker_embeddings is None: speaker_embeddings = torch.zeros((1, 512), device=input_values.device) return _generate_speech( self, input_values, speaker_embeddings, attention_mask, threshold, minlenratio, maxlenratio, vocoder, output_cross_attentions, return_output_lengths, ) class HifiGanResidualBlock(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i]), ) for i in range(len(dilation)) ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1), ) for _ in range(len(dilation)) ] ) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm for layer in self.convs1: weight_norm(layer) for layer in self.convs2: weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for conv1, conv2 in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states @auto_docstring( custom_intro=""" HiFi-GAN vocoder. """ ) class SpeechT5HifiGan(PreTrainedModel): config: SpeechT5HifiGanConfig main_input_name = "spectrogram" def __init__(self, config: SpeechT5HifiGanConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.num_upsamples = len(config.upsample_rates) self.conv_pre = nn.Conv1d( config.model_in_dim, config.upsample_initial_channel, kernel_size=7, stride=1, padding=3, ) self.upsampler = nn.ModuleList() for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)): self.upsampler.append( nn.ConvTranspose1d( config.upsample_initial_channel // (2**i), config.upsample_initial_channel // (2 ** (i + 1)), kernel_size=kernel_size, stride=upsample_rate, padding=(kernel_size - upsample_rate) // 2, ) ) self.resblocks = nn.ModuleList() for i in range(len(self.upsampler)): channels = config.upsample_initial_channel // (2 ** (i + 1)) for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes): self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope)) self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3) self.register_buffer("mean", torch.zeros(config.model_in_dim)) self.register_buffer("scale", torch.ones(config.model_in_dim)) # Initialize weights and apply final processing self.post_init() def _init_weights(self, module: nn.Module): """Initialize the weights.""" if isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv_pre) for layer in self.upsampler: weight_norm(layer) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.upsampler: nn.utils.remove_weight_norm(layer) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post) @auto_docstring( custom_intro=""" Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech waveform. """ ) def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor: r""" spectrogram (`torch.FloatTensor`): Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length, config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`. Returns: `torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`. """ if self.config.normalize_before: spectrogram = (spectrogram - self.mean) / self.scale is_batched = spectrogram.dim() == 3 if not is_batched: spectrogram = spectrogram.unsqueeze(0) hidden_states = spectrogram.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for i in range(self.num_upsamples): hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope) hidden_states = self.upsampler[i](hidden_states) res_state = self.resblocks[i * self.num_kernels](hidden_states) for j in range(1, self.num_kernels): res_state += self.resblocks[i * self.num_kernels + j](hidden_states) hidden_states = res_state / self.num_kernels hidden_states = nn.functional.leaky_relu(hidden_states) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) if not is_batched: # remove batch dim and collapse tensor to 1-d audio waveform waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1) else: # remove seq-len dim since this collapses to 1 waveform = hidden_states.squeeze(1) return waveform __all__ = [ "SpeechT5ForSpeechToText", "SpeechT5ForSpeechToSpeech", "SpeechT5ForTextToSpeech", "SpeechT5Model", "SpeechT5PreTrainedModel", "SpeechT5HifiGan", ]
transformers/src/transformers/models/speecht5/modeling_speecht5.py/0
{ "file_path": "transformers/src/transformers/models/speecht5/modeling_speecht5.py", "repo_id": "transformers", "token_count": 64388 }
537
# coding=utf-8 # Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch StableLM model.""" import math from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available from ...modeling_layers import ( GenericForSequenceClassification, GenericForTokenClassification, GradientCheckpointingLayer, ) from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...utils.deprecation import deprecate_kwarg from .configuration_stablelm import StableLmConfig if is_torch_flex_attn_available(): from torch.nn.attention.flex_attention import BlockMask from ...integrations.flex_attention import make_flex_block_causal_mask if is_flash_attn_available(): from ...modeling_flash_attention_utils import _flash_attention_forward logger = logging.get_logger(__name__) # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->StableLm class StableLmRotaryEmbedding(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: StableLmConfig, device=None): super().__init__() # BC: "rope_type" was originally "type" if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict): self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) else: self.rope_type = "default" self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = self.inv_freq @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) # Copied from transformers.models.llama.modeling_llama.rotate_half def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed # Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->StableLm class StableLmMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj class StableLmLayerNormPerHead(nn.Module): def __init__(self, dim, num_heads, eps=1e-5, bias=False): super().__init__() self.dim = dim self.num_heads = num_heads self.norms = nn.ModuleList([nn.LayerNorm(dim, eps=eps, bias=bias) for _ in range(self.num_heads)]) def forward(self, hidden_states: torch.Tensor): # Split along the num_heads axis to get per-head inputs # [batch_size, num_heads, seq_len, head_dim] -> [batch_size, 1, seq_len, head_dim] * num_heads states_per_heads = torch.split(hidden_states, 1, dim=1) # Normalize and merge the heads back together return torch.cat([norm(hidden_states) for norm, hidden_states in zip(self.norms, states_per_heads)], dim=1) # Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class StableLmAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: StableLmConfig, layer_idx: Optional[int] = None): super().__init__() self.config = config self.layer_idx = layer_idx if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.rope_theta = config.rope_theta self.rotary_ndims = int(self.head_dim * config.partial_rotary_factor) self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.use_qkv_bias) self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_qkv_bias) self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.use_qkv_bias) self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False) self.qk_layernorm = config.qk_layernorm if self.qk_layernorm: self.q_layernorm = StableLmLayerNormPerHead(self.head_dim, self.num_heads, eps=config.layer_norm_eps) self.k_layernorm = StableLmLayerNormPerHead( self.head_dim, self.num_key_value_heads, eps=config.layer_norm_eps ) self.attention_dropout = nn.Dropout(config.attention_dropout) self.rotary_emb = StableLmRotaryEmbedding(config=self.config) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.qk_layernorm: query_states = self.q_layernorm(query_states) key_states = self.k_layernorm(key_states) cos, sin = position_embeddings # Partial rotary embedding query_rot, query_pass = ( query_states[..., : self.rotary_ndims], query_states[..., self.rotary_ndims :], ) key_rot, key_pass = ( key_states[..., : self.rotary_ndims], key_states[..., self.rotary_ndims :], ) # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) # [batch_size, seq_length, num_heads, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if past_key_values is not None: # Specific to RoPE models with partial rotation cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # Repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights += causal_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype) attn_weights = self.attention_dropout(attn_weights) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights class StableLmSdpaAttention(StableLmAttention): @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if output_attentions: # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. logger.warning_once( "StableLmModel is using StableLmSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.qk_layernorm: query_states = self.q_layernorm(query_states) key_states = self.k_layernorm(key_states) cos, sin = position_embeddings # Partial rotary embedding query_rot, query_pass = ( query_states[..., : self.rotary_ndims], query_states[..., self.rotary_ndims :], ) key_rot, key_pass = ( key_states[..., : self.rotary_ndims], key_states[..., self.rotary_ndims :], ) # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor] query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) # [batch_size, seq_length, num_heads, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if past_key_values is not None: # Specific to RoPE models with partial rotation cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # Repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) causal_mask = attention_mask if attention_mask is not None: # no matter the length, we just slice it causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, # Reference: https://github.com/pytorch/pytorch/issues/112577. if query_states.device.type == "cuda" and attention_mask is not None: query_states = query_states.contiguous() key_states = key_states.contiguous() value_states = value_states.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. is_causal = bool(causal_mask is None and q_len > 1) attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=causal_mask, dropout_p=self.attention_dropout.p if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output, None class StableLmFlashAttention2(StableLmAttention): """ StableLM flash attention module. This module inherits from `StableLmAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC **kwargs, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: # StableLmFlashAttention2 attention does not support output_attentions output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) if self.qk_layernorm: query_states = self.q_layernorm(query_states) key_states = self.k_layernorm(key_states) cos, sin = position_embeddings # Partial rotary embedding query_rot, query_pass = ( query_states[..., : self.rotary_ndims], query_states[..., self.rotary_ndims :], ) key_rot, key_pass = ( key_states[..., : self.rotary_ndims], key_states[..., self.rotary_ndims :], ) query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin) # [batch_size, seq_length, num_heads, head_dim] query_states = torch.cat((query_rot, query_pass), dim=-1) key_states = torch.cat((key_rot, key_pass), dim=-1) if past_key_values is not None: cache_kwargs = { "sin": sin, "cos": cos, "partial_rotation_size": self.rotary_ndims, "cache_position": cache_position, } key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout.p if self.training else 0.0 attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, use_top_left_mask=self._flash_attn_uses_top_left_mask, is_causal=self.is_causal, ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights ATTENTION_CLASSES = { "eager": StableLmAttention, "sdpa": StableLmSdpaAttention, "flash_attention_2": StableLmFlashAttention2, } class StableLmDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: StableLmConfig, layer_idx: int): super().__init__() self.use_parallel_residual = config.use_parallel_residual self.hidden_size = config.hidden_size self.self_attn = ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx) self.mlp = StableLmMLP(config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = None if not self.use_parallel_residual: self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*): Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`, with `head_dim` being the embedding dimension of each attention head. """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention self_attn_output, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) # copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXLayer.forward if self.use_parallel_residual: # x = x + attn(ln1(x)) + mlp(ln1(x)) # Fully Connected mlp_output = self.mlp(hidden_states) mlp_output = self.dropout(mlp_output) hidden_states = residual + self_attn_output + mlp_output else: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) residual = residual + self_attn_output # Fully Connected mlp_output = self.mlp(self.post_attention_layernorm(residual)) mlp_output = self.dropout(mlp_output) hidden_states = residual + mlp_output outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs @auto_docstring class StableLmPreTrainedModel(PreTrainedModel): config: StableLmConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["StableLmDecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_() @auto_docstring class StableLmModel(StableLmPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`StableLmDecoderLayer`] Args: config: StableLmConfig """ def __init__(self, config: StableLmConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [StableLmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.rotary_emb = StableLmRotaryEmbedding(config=config) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache if not isinstance(past_key_values, (type(None), Cache)): raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.") if use_cache and past_key_values is None: past_key_values = DynamicCache() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask def _update_causal_mask( self, attention_mask: Union[torch.Tensor, "BlockMask"], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool = False, ): if self.config._attn_implementation == "flash_attention_2": if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == "flex_attention": if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail # to infer the attention mask. past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions: if AttentionMaskConverter._ignore_causal_mask_sdpa( attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training, ): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = ( attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 ) # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], ) if ( self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"] and not output_attentions ): # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. # Details: https://github.com/pytorch/pytorch/issues/110213 min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod # Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs, ): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full( (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device ) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to( causal_mask.device ) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( padding_mask, min_dtype ) return causal_mask # Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM with PERSIMMON->STABLELM,Persimmon->StableLm class StableLmForCausalLM(StableLmPreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->STABLELM,Llama->StableLm def __init__(self, config): super().__init__(config) self.model = StableLmModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder def set_decoder(self, decoder): self.model = decoder # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder def get_decoder(self): return self.model @can_return_tuple @auto_docstring # Ignore copy def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs, ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, StableLmForCausalLM >>> model = StableLmForCausalLM.from_pretrained("adept/persimmon-8b-base") >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base") >>> prompt = "human: Hey, what should I eat for dinner?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, ) hidden_states = outputs.last_hidden_state # No upscaling to float was ever done for StableLm slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits, labels, vocab_size=self.config.vocab_size, **kwargs, ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class StableLmForSequenceClassification(GenericForSequenceClassification, StableLmPreTrainedModel): ... class StableLmForTokenClassification(GenericForTokenClassification, StableLmPreTrainedModel): ... __all__ = [ "StableLmForCausalLM", "StableLmModel", "StableLmPreTrainedModel", "StableLmForSequenceClassification", "StableLmForTokenClassification", ]
transformers/src/transformers/models/stablelm/modeling_stablelm.py/0
{ "file_path": "transformers/src/transformers/models/stablelm/modeling_stablelm.py", "repo_id": "transformers", "token_count": 19804 }
538
# coding=utf-8 # Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Swin2SR Transformer model.""" import collections.abc import math from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, ImageSuperResolutionOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from .configuration_swin2sr import Swin2SRConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring( custom_intro=""" Swin2SR encoder's outputs, with potential hidden states and attentions. """ ) class Swin2SREncoderOutput(ModelOutput): last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None # Copied from transformers.models.swin.modeling_swin.window_partition def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Copied from transformers.models.swin.modeling_swin.window_reverse def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.beit.modeling_beit.drop_path def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output # Copied from transformers.models.swin.modeling_swin.SwinDropPath with Swin->Swin2SR class Swin2SRDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return f"p={self.drop_prob}" class Swin2SREmbeddings(nn.Module): """ Construct the patch and optional position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = Swin2SRPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches if config.use_absolute_embeddings: self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) else: self.position_embeddings = None self.dropout = nn.Dropout(config.hidden_dropout_prob) self.window_size = config.window_size def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor]: embeddings, output_dimensions = self.patch_embeddings(pixel_values) if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = self.dropout(embeddings) return embeddings, output_dimensions class Swin2SRPatchEmbeddings(nn.Module): def __init__(self, config, normalize_patches=True): super().__init__() num_channels = config.embed_dim image_size, patch_size = config.image_size, config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) patches_resolution = [image_size[0] // patch_size[0], image_size[1] // patch_size[1]] self.patches_resolution = patches_resolution self.num_patches = patches_resolution[0] * patches_resolution[1] self.projection = nn.Conv2d(num_channels, config.embed_dim, kernel_size=patch_size, stride=patch_size) self.layernorm = nn.LayerNorm(config.embed_dim) if normalize_patches else None def forward(self, embeddings: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]: embeddings = self.projection(embeddings) _, _, height, width = embeddings.shape output_dimensions = (height, width) embeddings = embeddings.flatten(2).transpose(1, 2) if self.layernorm is not None: embeddings = self.layernorm(embeddings) return embeddings, output_dimensions class Swin2SRPatchUnEmbeddings(nn.Module): r"""Image to Patch Unembedding""" def __init__(self, config): super().__init__() self.embed_dim = config.embed_dim def forward(self, embeddings, x_size): batch_size, height_width, num_channels = embeddings.shape embeddings = embeddings.transpose(1, 2).view(batch_size, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C return embeddings # Copied from transformers.models.swinv2.modeling_swinv2.Swinv2PatchMerging with Swinv2->Swin2SR class Swin2SRPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(2 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be divisible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # [batch_size, height/2 * width/2, 4*num_channels] input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # [batch_size, height/2 * width/2, 4*C] input_feature = self.reduction(input_feature) input_feature = self.norm(input_feature) return input_feature # Copied from transformers.models.swinv2.modeling_swinv2.Swinv2SelfAttention with Swinv2->Swin2SR class Swin2SRSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=[0, 0]): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.pretrained_window_size = pretrained_window_size self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.continuous_position_bias_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.int64).float() relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.int64).float() relative_coords_table = ( torch.stack(meshgrid([relative_coords_h, relative_coords_w], indexing="ij")) .permute(1, 2, 0) .contiguous() .unsqueeze(0) ) # [1, 2*window_height - 1, 2*window_width - 1, 2] if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= pretrained_window_size[0] - 1 relative_coords_table[:, :, :, 1] /= pretrained_window_size[1] - 1 elif window_size > 1: relative_coords_table[:, :, :, 0] /= self.window_size[0] - 1 relative_coords_table[:, :, :, 1] /= self.window_size[1] - 1 relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = ( torch.sign(relative_coords_table) * torch.log2(torch.abs(relative_coords_table) + 1.0) / math.log2(8) ) # set to same dtype as mlp weight relative_coords_table = relative_coords_table.to(next(self.continuous_position_bias_mlp.parameters()).dtype) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=False) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # cosine attention attention_scores = nn.functional.normalize(query_layer, dim=-1) @ nn.functional.normalize( key_layer, dim=-1 ).transpose(-2, -1) logit_scale = torch.clamp(self.logit_scale, max=math.log(1.0 / 0.01)).exp() attention_scores = attention_scores * logit_scale relative_position_bias_table = self.continuous_position_bias_mlp(self.relative_coords_table).view( -1, self.num_attention_heads ) # [window_height*window_width,window_height*window_width,num_attention_heads] relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) # [num_attention_heads,window_height*window_width,window_height*window_width] relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in Swin2SRModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->Swin2SR class Swin2SRSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swinv2.modeling_swinv2.Swinv2Attention with Swinv2->Swin2SR class Swin2SRAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size, pretrained_window_size=0): super().__init__() self.self = Swin2SRSelfAttention( config=config, dim=dim, num_heads=num_heads, window_size=window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.output = Swin2SRSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->Swin2SR class Swin2SRIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->Swin2SR class Swin2SROutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swinv2.modeling_swinv2.Swinv2Layer with Swinv2->Swin2SR class Swin2SRLayer(nn.Module): def __init__( self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0 ): super().__init__() self.input_resolution = input_resolution window_size, shift_size = self._compute_window_shift( (config.window_size, config.window_size), (shift_size, shift_size) ) self.window_size = window_size[0] self.shift_size = shift_size[0] self.attention = Swin2SRAttention( config=config, dim=dim, num_heads=num_heads, window_size=self.window_size, pretrained_window_size=pretrained_window_size if isinstance(pretrained_window_size, collections.abc.Iterable) else (pretrained_window_size, pretrained_window_size), ) self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.drop_path = Swin2SRDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.intermediate = Swin2SRIntermediate(config, dim) self.output = Swin2SROutput(config, dim) self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]: window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return window_size, shift_size def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for shifted window multihead self attention img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, torch.Tensor]: height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states # pad hidden_states to multiples of window size hidden_states = hidden_states.view(batch_size, height, width, channels) hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = self.layernorm_before(attention_windows) hidden_states = shortcut + self.drop_path(hidden_states) layer_output = self.intermediate(hidden_states) layer_output = self.output(layer_output) layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output)) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class Swin2SRStage(GradientCheckpointingLayer): """ This corresponds to the Residual Swin Transformer Block (RSTB) in the original implementation. """ def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, pretrained_window_size=0): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ Swin2SRLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, pretrained_window_size=pretrained_window_size, ) for i in range(depth) ] ) if config.resi_connection == "1conv": self.conv = nn.Conv2d(dim, dim, 3, 1, 1) elif config.resi_connection == "3conv": # to save parameters and memory self.conv = nn.Sequential( nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(dim // 4, dim, 3, 1, 1), ) self.patch_embed = Swin2SRPatchEmbeddings(config, normalize_patches=False) self.patch_unembed = Swin2SRPatchUnEmbeddings(config) def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor]: residual = hidden_states height, width = input_dimensions for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] output_dimensions = (height, width, height, width) hidden_states = self.patch_unembed(hidden_states, input_dimensions) hidden_states = self.conv(hidden_states) hidden_states, _ = self.patch_embed(hidden_states) hidden_states = hidden_states + residual stage_outputs = (hidden_states, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class Swin2SREncoder(nn.Module): def __init__(self, config, grid_size): super().__init__() self.num_stages = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")] self.stages = nn.ModuleList( [ Swin2SRStage( config=config, dim=config.embed_dim, input_resolution=(grid_size[0], grid_size[1]), depth=config.depths[stage_idx], num_heads=config.num_heads[stage_idx], drop_path=dpr[sum(config.depths[:stage_idx]) : sum(config.depths[: stage_idx + 1])], pretrained_window_size=0, ) for stage_idx in range(self.num_stages) ] ) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[tuple, Swin2SREncoderOutput]: all_input_dimensions = () all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: all_hidden_states += (hidden_states,) for i, stage_module in enumerate(self.stages): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = stage_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] output_dimensions = layer_outputs[1] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) all_input_dimensions += (input_dimensions,) if output_hidden_states: all_hidden_states += (hidden_states,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return Swin2SREncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring class Swin2SRPreTrainedModel(PreTrainedModel): config: Swin2SRConfig base_model_prefix = "swin2sr" main_input_name = "pixel_values" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): torch.nn.init.trunc_normal_(module.weight.data, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @auto_docstring class Swin2SRModel(Swin2SRPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config if config.num_channels == 3 and config.num_channels_out == 3: mean = torch.tensor([0.4488, 0.4371, 0.4040]).view(1, 3, 1, 1) else: mean = torch.zeros(1, 1, 1, 1) self.register_buffer("mean", mean, persistent=False) self.img_range = config.img_range self.first_convolution = nn.Conv2d(config.num_channels, config.embed_dim, 3, 1, 1) self.embeddings = Swin2SREmbeddings(config) self.encoder = Swin2SREncoder(config, grid_size=self.embeddings.patch_embeddings.patches_resolution) self.layernorm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps) self.patch_unembed = Swin2SRPatchUnEmbeddings(config) self.conv_after_body = nn.Conv2d(config.embed_dim, config.embed_dim, 3, 1, 1) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def pad_and_normalize(self, pixel_values): _, _, height, width = pixel_values.size() # 1. pad window_size = self.config.window_size modulo_pad_height = (window_size - height % window_size) % window_size modulo_pad_width = (window_size - width % window_size) % window_size pixel_values = nn.functional.pad(pixel_values, (0, modulo_pad_width, 0, modulo_pad_height), "reflect") # 2. normalize mean = self.mean.type_as(pixel_values) pixel_values = (pixel_values - mean) * self.img_range return pixel_values @auto_docstring def forward( self, pixel_values: torch.FloatTensor, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, len(self.config.depths)) _, _, height, width = pixel_values.shape # some preprocessing: padding + normalization pixel_values = self.pad_and_normalize(pixel_values) embeddings = self.first_convolution(pixel_values) embedding_output, input_dimensions = self.embeddings(embeddings) encoder_outputs = self.encoder( embedding_output, input_dimensions, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) sequence_output = self.patch_unembed(sequence_output, (height, width)) sequence_output = self.conv_after_body(sequence_output) + embeddings if not return_dict: output = (sequence_output,) + encoder_outputs[1:] return output return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class Upsample(nn.Module): """Upsample module. Args: scale (`int`): Scale factor. Supported scales: 2^n and 3. num_features (`int`): Channel number of intermediate features. """ def __init__(self, scale, num_features): super().__init__() self.scale = scale if (scale & (scale - 1)) == 0: # scale = 2^n for i in range(int(math.log(scale, 2))): self.add_module(f"convolution_{i}", nn.Conv2d(num_features, 4 * num_features, 3, 1, 1)) self.add_module(f"pixelshuffle_{i}", nn.PixelShuffle(2)) elif scale == 3: self.convolution = nn.Conv2d(num_features, 9 * num_features, 3, 1, 1) self.pixelshuffle = nn.PixelShuffle(3) else: raise ValueError(f"Scale {scale} is not supported. Supported scales: 2^n and 3.") def forward(self, hidden_state): if (self.scale & (self.scale - 1)) == 0: for i in range(int(math.log(self.scale, 2))): hidden_state = self.__getattr__(f"convolution_{i}")(hidden_state) hidden_state = self.__getattr__(f"pixelshuffle_{i}")(hidden_state) elif self.scale == 3: hidden_state = self.convolution(hidden_state) hidden_state = self.pixelshuffle(hidden_state) return hidden_state class UpsampleOneStep(nn.Module): """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle) Used in lightweight SR to save parameters. Args: scale (int): Scale factor. Supported scales: 2^n and 3. in_channels (int): Channel number of intermediate features. out_channels (int): Channel number of output features. """ def __init__(self, scale, in_channels, out_channels): super().__init__() self.conv = nn.Conv2d(in_channels, (scale**2) * out_channels, 3, 1, 1) self.pixel_shuffle = nn.PixelShuffle(scale) def forward(self, x): x = self.conv(x) x = self.pixel_shuffle(x) return x class PixelShuffleUpsampler(nn.Module): def __init__(self, config, num_features): super().__init__() self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1) self.activation = nn.LeakyReLU(inplace=True) self.upsample = Upsample(config.upscale, num_features) self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1) def forward(self, sequence_output): x = self.conv_before_upsample(sequence_output) x = self.activation(x) x = self.upsample(x) x = self.final_convolution(x) return x class NearestConvUpsampler(nn.Module): def __init__(self, config, num_features): super().__init__() if config.upscale != 4: raise ValueError("The nearest+conv upsampler only supports an upscale factor of 4 at the moment.") self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1) self.activation = nn.LeakyReLU(inplace=True) self.conv_up1 = nn.Conv2d(num_features, num_features, 3, 1, 1) self.conv_up2 = nn.Conv2d(num_features, num_features, 3, 1, 1) self.conv_hr = nn.Conv2d(num_features, num_features, 3, 1, 1) self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, sequence_output): sequence_output = self.conv_before_upsample(sequence_output) sequence_output = self.activation(sequence_output) sequence_output = self.lrelu( self.conv_up1(torch.nn.functional.interpolate(sequence_output, scale_factor=2, mode="nearest")) ) sequence_output = self.lrelu( self.conv_up2(torch.nn.functional.interpolate(sequence_output, scale_factor=2, mode="nearest")) ) reconstruction = self.final_convolution(self.lrelu(self.conv_hr(sequence_output))) return reconstruction class PixelShuffleAuxUpsampler(nn.Module): def __init__(self, config, num_features): super().__init__() self.upscale = config.upscale self.conv_bicubic = nn.Conv2d(config.num_channels, num_features, 3, 1, 1) self.conv_before_upsample = nn.Conv2d(config.embed_dim, num_features, 3, 1, 1) self.activation = nn.LeakyReLU(inplace=True) self.conv_aux = nn.Conv2d(num_features, config.num_channels, 3, 1, 1) self.conv_after_aux = nn.Sequential(nn.Conv2d(3, num_features, 3, 1, 1), nn.LeakyReLU(inplace=True)) self.upsample = Upsample(config.upscale, num_features) self.final_convolution = nn.Conv2d(num_features, config.num_channels_out, 3, 1, 1) def forward(self, sequence_output, bicubic, height, width): bicubic = self.conv_bicubic(bicubic) sequence_output = self.conv_before_upsample(sequence_output) sequence_output = self.activation(sequence_output) aux = self.conv_aux(sequence_output) sequence_output = self.conv_after_aux(aux) sequence_output = ( self.upsample(sequence_output)[:, :, : height * self.upscale, : width * self.upscale] + bicubic[:, :, : height * self.upscale, : width * self.upscale] ) reconstruction = self.final_convolution(sequence_output) return reconstruction, aux @auto_docstring( custom_intro=""" Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration. """ ) class Swin2SRForImageSuperResolution(Swin2SRPreTrainedModel): def __init__(self, config): super().__init__(config) self.swin2sr = Swin2SRModel(config) self.upsampler = config.upsampler self.upscale = config.upscale # Upsampler num_features = 64 if self.upsampler == "pixelshuffle": self.upsample = PixelShuffleUpsampler(config, num_features) elif self.upsampler == "pixelshuffle_aux": self.upsample = PixelShuffleAuxUpsampler(config, num_features) elif self.upsampler == "pixelshuffledirect": # for lightweight SR (to save parameters) self.upsample = UpsampleOneStep(config.upscale, config.embed_dim, config.num_channels_out) elif self.upsampler == "nearest+conv": # for real-world SR (less artifacts) self.upsample = NearestConvUpsampler(config, num_features) else: # for image denoising and JPEG compression artifact reduction self.final_convolution = nn.Conv2d(config.embed_dim, config.num_channels_out, 3, 1, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, ImageSuperResolutionOutput]: r""" Example: ```python >>> import torch >>> import numpy as np >>> from PIL import Image >>> import requests >>> from transformers import AutoImageProcessor, Swin2SRForImageSuperResolution >>> processor = AutoImageProcessor.from_pretrained("caidas/swin2SR-classical-sr-x2-64") >>> model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64") >>> url = "https://huggingface.co/spaces/jjourney1125/swin2sr/resolve/main/samples/butterfly.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # prepare image for the model >>> inputs = processor(image, return_tensors="pt") >>> # forward pass >>> with torch.no_grad(): ... outputs = model(**inputs) >>> output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy() >>> output = np.moveaxis(output, source=0, destination=-1) >>> output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 >>> # you can visualize `output` with `Image.fromarray` ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict loss = None if labels is not None: raise NotImplementedError("Training is not supported at the moment") height, width = pixel_values.shape[2:] if self.config.upsampler == "pixelshuffle_aux": bicubic = nn.functional.interpolate( pixel_values, size=(height * self.upscale, width * self.upscale), mode="bicubic", align_corners=False, ) outputs = self.swin2sr( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] if self.upsampler in ["pixelshuffle", "pixelshuffledirect", "nearest+conv"]: reconstruction = self.upsample(sequence_output) elif self.upsampler == "pixelshuffle_aux": reconstruction, aux = self.upsample(sequence_output, bicubic, height, width) aux = aux / self.swin2sr.img_range + self.swin2sr.mean else: reconstruction = pixel_values + self.final_convolution(sequence_output) reconstruction = reconstruction / self.swin2sr.img_range + self.swin2sr.mean reconstruction = reconstruction[:, :, : height * self.upscale, : width * self.upscale] if not return_dict: output = (reconstruction,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageSuperResolutionOutput( loss=loss, reconstruction=reconstruction, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["Swin2SRForImageSuperResolution", "Swin2SRModel", "Swin2SRPreTrainedModel"]
transformers/src/transformers/models/swin2sr/modeling_swin2sr.py/0
{ "file_path": "transformers/src/transformers/models/swin2sr/modeling_swin2sr.py", "repo_id": "transformers", "token_count": 20219 }
539
# coding=utf-8 # Copyright 2021 T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax T5 model.""" import copy from typing import Callable, Optional import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_t5 import T5Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google-t5/t5-small" _CONFIG_FOR_DOC = "T5Config" remat = nn_partitioning.remat # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = jnp.zeros_like(input_ids) shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids class FlaxT5LayerNorm(nn.Module): hidden_size: int dtype: jnp.dtype = jnp.float32 eps: float = 1e-6 weight_init: Callable[..., np.ndarray] = jax.nn.initializers.ones def setup(self): self.weight = self.param("weight", self.weight_init, (self.hidden_size,)) def __call__(self, hidden_states): """ Construct a layernorm module in the T5 style; No bias and no subtraction of mean. """ # layer norm should always be calculated in float32 variance = jnp.power(hidden_states.astype("f4"), 2).mean(axis=-1, keepdims=True) hidden_states = hidden_states / jnp.sqrt(variance + self.eps) return self.weight * hidden_states class FlaxT5DenseActDense(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic=True): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states class FlaxT5DenseGatedActDense(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): wi_init_std = self.config.initializer_factor * (self.config.d_model**-0.5) wo_init_std = self.config.initializer_factor * (self.config.d_ff**-0.5) self.wi_0 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wi_1 = nn.Dense( self.config.d_ff, use_bias=False, kernel_init=jax.nn.initializers.normal(wi_init_std), dtype=self.dtype, ) self.wo = nn.Dense( self.config.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(wo_init_std), dtype=self.dtype, ) self.dropout = nn.Dropout(self.config.dropout_rate) self.act = ACT2FN[self.config.dense_act_fn] def __call__(self, hidden_states, deterministic): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.wo(hidden_states) return hidden_states class FlaxT5LayerFF(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): if self.config.is_gated_act: self.DenseReluDense = FlaxT5DenseGatedActDense(self.config, dtype=self.dtype) else: self.DenseReluDense = FlaxT5DenseActDense(self.config, dtype=self.dtype) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__(self, hidden_states, deterministic=True): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states, deterministic=deterministic) hidden_states = hidden_states + self.dropout(forwarded_states, deterministic=deterministic) return hidden_states class FlaxT5Attention(nn.Module): config: T5Config has_relative_attention_bias: bool = False causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.relative_attention_num_buckets = self.config.relative_attention_num_buckets self.relative_attention_max_distance = self.config.relative_attention_max_distance self.d_model = self.config.d_model self.key_value_proj_dim = self.config.d_kv self.n_heads = self.config.num_heads self.dropout = self.config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim q_init_std = self.config.initializer_factor * ((self.inner_dim * self.key_value_proj_dim) ** -0.5) kv_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) o_init_std = self.config.initializer_factor * (self.inner_dim**-0.5) self.q = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(q_init_std), dtype=self.dtype, ) self.k = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.v = nn.Dense( self.inner_dim, use_bias=False, kernel_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) self.o = nn.Dense( self.d_model, use_bias=False, kernel_init=jax.nn.initializers.normal(o_init_std), dtype=self.dtype, ) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embed( self.relative_attention_num_buckets, self.n_heads, embedding_init=jax.nn.initializers.normal(kv_init_std), dtype=self.dtype, ) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0) * num_buckets relative_position = jnp.abs(relative_position) else: relative_position = -jnp.clip(relative_position, a_max=0) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( jnp.log(relative_position / max_exact) / jnp.log(max_distance / max_exact) * (num_buckets - max_exact) ) relative_position_if_large = jnp.clip(relative_position_if_large, a_max=num_buckets - 1) relative_buckets += jnp.where(is_small, relative_position, relative_position_if_large) return relative_buckets.astype("i4") def compute_bias(self, query_length, key_length): """Compute binned relative position bias""" context_position = jnp.arange(query_length, dtype="i4")[:, None] memory_position = jnp.arange(key_length, dtype="i4")[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_position_bucket( relative_position, bidirectional=(not self.causal), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias(relative_position_bucket) values = values.transpose((2, 0, 1))[None, :, :, :] return values def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.n_heads, self.key_value_proj_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.inner_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slightly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = jax.lax.dynamic_update_slice(cached_key.value, key, indices) value = jax.lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions # that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def _create_position_bias( self, key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ): cache_is_filled = self.causal and self.has_variable("cache", "cached_key") and (not init_cache) key_length = key_states.shape[1] query_length = key_length if cache_is_filled else query_states.shape[1] if self.has_relative_attention_bias: position_bias = self.compute_bias(query_length, key_length) elif attention_mask is not None: position_bias = jnp.zeros_like(attention_mask) else: position_bias = jnp.zeros((1, self.n_heads, query_length, key_length), dtype=self.dtype) # if key and values are already calculated, only the last query position bias should be taken if cache_is_filled: max_decoder_length = self.variables["cache"]["cached_key"].shape[1] position_bias = jax.lax.dynamic_slice( position_bias, (0, 0, causal_attention_mask_shift, 0), (1, self.n_heads, seq_length, max_decoder_length), ) return position_bias def __call__( self, hidden_states, attention_mask=None, key_value_states=None, position_bias=None, use_cache=False, output_attentions=False, deterministic=True, init_cache=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ batch_size, seq_length = hidden_states.shape[:2] # q, k, v projections query_states = self.q(hidden_states) # (batch_size, n_heads, seq_length, dim_per_head) key_states = self.k(hidden_states) if key_value_states is None else self.k(key_value_states) value_states = self.v(hidden_states) if key_value_states is None else self.v(key_value_states) # reshape to (batch_size, seq_length, n_heads, head_dim) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # counter-act scaling in dot_product_attention_weights function query_states *= jnp.sqrt(query_states.shape[-1]) # for fast decoding causal attention mask should be shifted causal_attention_mask_shift = ( self.variables["cache"]["cache_index"] if (self.has_variable("cache", "cached_key") and self.causal) else 0 ) # create causal attention_mask; attention_mask has to be defined when model is causal if self.causal: causal_attention_mask = make_causal_mask(attention_mask, dtype="bool") # fast decoding for generate requires special attention_mask if self.has_variable("cache", "cached_key"): max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_attention_mask = jax.lax.dynamic_slice( causal_attention_mask, (0, 0, causal_attention_mask_shift, 0), (1, 1, seq_length, max_decoder_length), ) # broadcast causal attention mask & attention mask to fit for merge causal_attention_mask = jnp.broadcast_to( causal_attention_mask, (batch_size,) + causal_attention_mask.shape[1:] ) attention_mask = jnp.broadcast_to( jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_attention_mask.shape ) attention_mask = combine_masks(attention_mask, causal_attention_mask) elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # replace masked positions with -10_000 if attention_mask is not None: mask_value = jnp.finfo(self.dtype).min attention_mask = jax.lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, mask_value).astype(self.dtype), ) if position_bias is None: # compute position bias (only for first layer) position_bias = self._create_position_bias( key_states, query_states, attention_mask, init_cache, seq_length, causal_attention_mask_shift ) if attention_mask is not None: position_bias = position_bias + attention_mask # create dropout rng dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") # Softmax(QK^T) attn_weights = dot_product_attention_weights( query_states, key_states, bias=position_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, ) # multiply with value states attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) # bring back to (batch_size, seq_length, d_model) attn_output = self._merge_heads(attn_output) # apply output matrix attn_output = self.o(attn_output) outputs = (attn_output, position_bias) if output_attentions: outputs = outputs + (attn_weights,) return outputs class FlaxT5LayerSelfAttention(nn.Module): config: T5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.SelfAttention = FlaxT5Attention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, causal=self.config.causal, dtype=self.dtype, ) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxT5LayerCrossAttention(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.EncDecAttention = FlaxT5Attention( self.config, has_relative_attention_bias=False, causal=False, dtype=self.dtype ) self.layer_norm = FlaxT5LayerNorm(self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, output_attentions=False, deterministic=True, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, attention_mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0], deterministic=deterministic) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class FlaxT5Block(nn.Module): config: T5Config has_relative_attention_bias: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.causal = self.config.causal self.layer = ( FlaxT5LayerSelfAttention( self.config, has_relative_attention_bias=self.has_relative_attention_bias, name=str(0), dtype=self.dtype, ), ) feed_forward_index = 1 if self.causal: self.layer += (FlaxT5LayerCrossAttention(self.config, name=str(1), dtype=self.dtype),) feed_forward_index += 1 self.layer += (FlaxT5LayerFF(self.config, name=str(feed_forward_index), dtype=self.dtype),) def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, return_dict=True, deterministic=True, init_cache=False, ): self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) hidden_states = self_attention_outputs[0] attention_outputs = self_attention_outputs[1:] # Keep self-attention outputs and relative position weights do_cross_attention = self.causal and encoder_hidden_states is not None if do_cross_attention: cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, ) hidden_states = cross_attention_outputs[0] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[1:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states, deterministic=deterministic) outputs = (hidden_states,) outputs = outputs + attention_outputs # returns hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) return outputs class FlaxT5LayerCollection(nn.Module): config: T5Config has_relative_attention_bias: bool dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.layer = FlaxT5Block( self.config, has_relative_attention_bias=self.has_relative_attention_bias, dtype=self.dtype ) def __call__( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, deterministic=True, init_cache=False, ): return self.layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, output_attentions=output_attentions, deterministic=deterministic, init_cache=init_cache, ) class FlaxT5BlockCollection(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal if self.gradient_checkpointing: FlaxT5CheckpointLayer = remat(FlaxT5LayerCollection, static_argnums=(6, 7, 8)) self.blocks = [ FlaxT5CheckpointLayer( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] else: self.blocks = [ FlaxT5LayerCollection( self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i), ) for i in range(self.config.num_layers) ] def __call__( self, hidden_states=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, deterministic: bool = True, init_cache: bool = False, ): # Prepare head mask if needed all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.causal) else None position_bias = None encoder_decoder_position_bias = None for i, layer_module in enumerate(self.blocks): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, position_bias, encoder_hidden_states, encoder_attention_mask, encoder_decoder_position_bias, output_attentions, deterministic, init_cache, ) hidden_states = layer_outputs[0] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[1] if self.causal and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2] if output_attentions: all_attentions = all_attentions + (layer_outputs[2],) if self.causal: all_cross_attentions = all_cross_attentions + (layer_outputs[4],) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class FlaxT5Stack(nn.Module): config: T5Config embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal self.block = FlaxT5BlockCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.final_layer_norm = FlaxT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) self.dropout = nn.Dropout(self.config.dropout_rate) def __call__( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, init_cache: bool = False, ): hidden_states = self.embed_tokens(input_ids) hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, deterministic=deterministic, init_cache=init_cache, ) hidden_states = outputs[0] hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) # Add last layer all_hidden_states = None if output_hidden_states: all_hidden_states = outputs.hidden_states all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: if output_hidden_states: return ( hidden_states, all_hidden_states, ) + outputs[2:] return (hidden_states,) + outputs[1:] return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) T5_ENCODE_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) For training, `decoder_input_ids` should be provided. encoder_outputs (`tuple(tuple(jnp.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training). attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. encoder_outputs (`tuple(tuple(jnp.ndarray)`, *optional*): Tuple consists of (`last_hidden_state`, `optional`: *hidden_states*, `optional`: *attentions*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(jnp.ndarray))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxT5PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config base_model_prefix = "transformer" module_class: nn.Module = None def __init__( self, config: T5Config, input_shape: tuple[int] = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_ids) args = [input_ids, attention_mask] if self.module_class not in [FlaxT5EncoderModule]: decoder_input_ids = jnp.ones_like(input_ids) decoder_attention_mask = jnp.ones_like(input_ids) args.extend([decoder_input_ids, decoder_attention_mask]) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, *args, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_input_ids: jnp.ndarray = None, decoder_attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if decoder_input_ids is None: raise ValueError( "Make sure to provide both `input_ids` and `decoder_input_ids`. `decoder_input_ids` is not passed" " here." ) # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # prepare decoder inputs if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(T5_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=T5Config) def encode( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, **kwargs) return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(T5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=T5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: Optional[dict] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs T5_START_DOCSTRING = r""" The T5 model was proposed in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://huggingface.co/papers/1910.10683) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`T5Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-stateswithout any specific head on top.", T5_START_DOCSTRING, ) class FlaxT5Module(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False self.encoder = FlaxT5Stack( encoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxT5Stack( decoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode if needed (training, first prediction pass) encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxT5Model(FlaxT5PreTrainedModel): module_class = FlaxT5Module append_call_sample_docstring(FlaxT5Model, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) FLAX_T5_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5Model >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5Model.from_pretrained("google-t5/t5-small") >>> input_ids = tokenizer( ... "Studies have been shown that owning a dog is good for you", return_tensors="np" ... ).input_ids >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. >>> decoder_input_ids = model._shift_right(decoder_input_ids) >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state ``` """ overwrite_call_docstring(FlaxT5Model, T5_INPUTS_DOCSTRING + FLAX_T5_MODEL_DOCSTRING) append_replace_return_docstrings(FlaxT5Model, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states without any specific head on top.", T5_START_DOCSTRING, ) class FlaxT5EncoderModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor * 1.0), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.is_decoder = False encoder_config.is_encoder_decoder = False encoder_config.causal = False self.encoder = FlaxT5Stack( encoder_config, embed_tokens=self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, input_ids=None, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict: bool = True, deterministic: bool = True, ): # Encode if needed (training, first prediction pass) encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) return encoder_outputs class FlaxT5EncoderModel(FlaxT5PreTrainedModel): module_class = FlaxT5EncoderModule @add_start_docstrings_to_model_forward(T5_ENCODE_INPUTS_DOCSTRING) def __call__( self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare encoder inputs if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_ids=jnp.array(input_ids, dtype="i4"), attention_mask=jnp.array(attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top.""", T5_START_DOCSTRING) class FlaxT5ForConditionalGenerationModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder def setup(self): self.model_dim = self.config.d_model self.shared = nn.Embed( self.config.vocab_size, self.config.d_model, embedding_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) encoder_config = copy.deepcopy(self.config) encoder_config.causal = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = FlaxT5Stack( encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = self.config.num_decoder_layers self.decoder = FlaxT5Stack( decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_factor), dtype=self.dtype, ) def __call__( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None, deterministic: bool = True, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Encode encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = encoder_outputs[0] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) if self.config.tie_word_embeddings: shared_embedding = self.shared.variables["params"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = self.lm_head(sequence_output) if not return_dict: return (lm_logits,) + decoder_outputs[1:] + encoder_outputs return FlaxSeq2SeqLMOutput( logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) class FlaxT5ForConditionalGeneration(FlaxT5PreTrainedModel): module_class = FlaxT5ForConditionalGenerationModule @add_start_docstrings(T5_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=T5Config) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, past_key_values: Optional[dict] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> import jax.numpy as jnp >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> text = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxT5Attention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs): decoder_module = module._get_decoder_module() decoder_outputs = decoder_module( decoder_input_ids, decoder_attention_mask, **kwargs, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.config.d_model**-0.5) if self.config.tie_word_embeddings: shared_embedding = module.shared.variables["params"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, sequence_output) else: lm_logits = module.lm_head(sequence_output) return lm_logits, decoder_outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: extended_attention_mask = jax.lax.dynamic_update_slice( extended_attention_mask, decoder_attention_mask, (0, 0) ) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values return model_kwargs FLAX_T5_CONDITIONAL_GENERATION_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") >>> ARTICLE_TO_SUMMARIZE = "summarize: My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], return_tensors="np") >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"]).sequences >>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)) ``` """ overwrite_call_docstring( FlaxT5ForConditionalGeneration, T5_INPUTS_DOCSTRING + FLAX_T5_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxT5ForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) __all__ = ["FlaxT5EncoderModel", "FlaxT5ForConditionalGeneration", "FlaxT5Model", "FlaxT5PreTrainedModel"]
transformers/src/transformers/models/t5/modeling_flax_t5.py/0
{ "file_path": "transformers/src/transformers/models/t5/modeling_flax_t5.py", "repo_id": "transformers", "token_count": 32767 }
540
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert TAPAS checkpoint.""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch( task, reset_position_index_per_cell, tf_checkpoint_path, tapas_config_file, pytorch_dump_path ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file config = TapasConfig.from_json_file(tapas_config_file) # set absolute/relative position embeddings parameter config.reset_position_index_per_cell = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": model = TapasForQuestionAnswering(config=config) elif task == "WTQ": # run_task_main.py hparams config.num_aggregation_labels = 4 config.use_answer_as_supervision = True # hparam_utils.py hparams config.answer_loss_cutoff = 0.664694 config.cell_selection_preference = 0.207951 config.huber_loss_delta = 0.121194 config.init_cell_selection_weights_to_zero = True config.select_one_column = True config.allow_empty_column_selection = False config.temperature = 0.0352513 model = TapasForQuestionAnswering(config=config) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams config.num_aggregation_labels = 4 config.use_answer_as_supervision = False # hparam_utils.py hparams config.answer_loss_cutoff = 36.4519 config.cell_selection_preference = 0.903421 config.huber_loss_delta = 222.088 config.init_cell_selection_weights_to_zero = True config.select_one_column = True config.allow_empty_column_selection = True config.temperature = 0.763141 model = TapasForQuestionAnswering(config=config) elif task == "TABFACT": model = TapasForSequenceClassification(config=config) elif task == "MLM": model = TapasForMaskedLM(config=config) elif task == "INTERMEDIATE_PRETRAINING": model = TapasModel(config=config) else: raise ValueError(f"Task {task} not supported.") print(f"Building PyTorch model from configuration: {config}") # Load weights from tf checkpoint load_tf_weights_in_tapas(model, config, tf_checkpoint_path) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}") tokenizer = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512) tokenizer.save_pretrained(pytorch_dump_path) print("Used relative position embeddings:", model.config.reset_position_index_per_cell) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
transformers/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 1935 }
541
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/timesfm/modular_timesfm.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_timesfm.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2025 Google LLC and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Sequence from dataclasses import dataclass from typing import Callable, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from ...integrations import use_kernel_forward_from_hub from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging from .configuration_timesfm import TimesFmConfig logger = logging.get_logger(__name__) @dataclass @auto_docstring class TimesFmOutput(BaseModelOutput): r""" loc (`torch.Tensor` of shape `(batch_size, )`): The mean of the time series inputs. scale (`torch.Tensor` of shape `(batch_size,)`): The scale of the time series inputs. """ loc: Optional[torch.Tensor] = None scale: Optional[torch.Tensor] = None @dataclass @auto_docstring class TimesFmOutputForPrediction(BaseModelOutput): r""" mean_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`): The mean predictions of the time series. full_predictions (`torch.Tensor` of shape `(batch_size, sequence_length)`): The full predictions of the time series including the mean and the quantiles. loss (`torch.Tensor` of shape `(1,)`, *optional*, returned when `future_values` is provided): The loss of the TimesFM model. """ mean_predictions: Optional[torch.Tensor] = None full_predictions: Optional[torch.Tensor] = None loss: Optional[Union[torch.Tensor, float]] = None class TimesFmMLP(nn.Module): """Pax MLP in pytorch.""" def __init__(self, config: TimesFmConfig): super().__init__() hidden_size = config.hidden_size intermediate_size = config.intermediate_size self.gate_proj = nn.Linear(hidden_size, intermediate_size) self.down_proj = nn.Linear(intermediate_size, hidden_size) self.layer_norm = nn.LayerNorm(normalized_shape=hidden_size, eps=1e-6) def forward(self, x, paddings=None): gate_inp = self.layer_norm(x) gate = self.gate_proj(gate_inp) gate = F.relu(gate) outputs = self.down_proj(gate) if paddings is not None: outputs = outputs * (1.0 - paddings[:, :, None]) return outputs + x class TimesFmResidualBlock(nn.Module): """TimesFM residual block.""" def __init__(self, input_dims, hidden_dims, output_dims): super().__init__() self.input_dims = input_dims self.hidden_dims = hidden_dims self.output_dims = output_dims self.input_layer = nn.Linear(input_dims, hidden_dims) self.activation = nn.SiLU() self.output_layer = nn.Linear(hidden_dims, output_dims) self.residual_layer = nn.Linear(input_dims, output_dims) def forward(self, x): hidden = self.input_layer(x) hidden = self.activation(hidden) output = self.output_layer(hidden) residual = self.residual_layer(x) return output + residual @use_kernel_forward_from_hub("RMSNorm") class TimesFmRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ TimesFmRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) def extra_repr(self): return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" class TimesFmPositionalEmbedding(nn.Module): """Generates position embedding for a given 1-d sequence.""" def __init__(self, config: TimesFmConfig): super().__init__() min_timescale = config.min_timescale max_timescale = config.max_timescale self.embedding_dims = config.hidden_size num_timescales = self.embedding_dims // 2 log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / max(num_timescales - 1, 1) self.register_buffer( "inv_timescales", min_timescale * torch.exp(torch.arange(num_timescales, dtype=torch.float32) * -log_timescale_increment), ) def forward(self, seq_length=None, position=None): """Generates a Tensor of sinusoids with different frequencies. Args: seq_length: an optional Python int defining the output sequence length. if the `position` argument is specified. position: [B, seq_length], optional position for each token in the sequence, only required when the sequence is packed. Returns: [B, seqlen, D] if `position` is specified, else [1, seqlen, D] """ if position is None and seq_length is None: raise ValueError("Either position or seq_length must be provided") if position is None: # [1, seqlen] position = torch.arange(seq_length, dtype=torch.float32, device=self.inv_timescales.device).unsqueeze(0) elif position.ndim != 2: raise ValueError(f"position must be 2-dimensional, got shape {position.shape}") scaled_time = position.view(*position.shape, 1) * self.inv_timescales.view(1, 1, -1) signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2) # Padding to ensure correct embedding dimension signal = F.pad(signal, (0, 0, 0, self.embedding_dims % 2)) return signal def simple_eager_attention_forward( module: nn.Module, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights class TimesFmAttention(nn.Module): """Implements the attention used in TimesFM. One key difference is that there is _per_dim_scaling of the query.""" def __init__(self, config: TimesFmConfig, layer_idx: int): super().__init__() self.config = config self.is_causal = True self.attention_dropout = config.attention_dropout self.layer_idx = layer_idx self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_dim = config.head_dim self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_heads * self.head_dim self.scaling = nn.Parameter(torch.empty((self.head_dim,))) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size) def _scale_query(self, query: torch.Tensor) -> torch.Tensor: scale = F.softplus(self.scaling).mul(1.442695041 / math.sqrt(self.head_dim)) return query * scale[None, None, None, :] def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) query_states = self._scale_query(query_states) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) attention_interface: Callable = simple_eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=1.0, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights class TimesFmDecoderLayer(nn.Module): """Transformer layer.""" def __init__(self, config: TimesFmConfig, layer_idx: int): super().__init__() self.self_attn = TimesFmAttention(config, layer_idx=layer_idx) self.mlp = TimesFmMLP(config) self.input_layernorm = TimesFmRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, paddings: torch.Tensor, output_attentions: bool = False, ) -> tuple[Optional[torch.Tensor], torch.Tensor]: # Self Attention residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, scores = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states # MLP hidden_states = self.mlp(hidden_states, paddings=paddings) return scores, hidden_states @auto_docstring class TimesFmPreTrainedModel(PreTrainedModel): config: TimesFmConfig base_model_prefix = "timesfm" _no_split_modules = ["TimesFmDecoderLayer"] main_input_name = "past_values" _supports_sdpa = True def _init_weights(self, module): super()._init_weights(module) if isinstance(module, TimesFmAttention): # Initialize scaling parameter nn.init.ones_(module.scaling) @auto_docstring class TimesFmModel(TimesFmPreTrainedModel): def __init__(self, config: TimesFmConfig): super().__init__(config) self.config = config self.input_ff_layer = TimesFmResidualBlock( input_dims=2 * config.patch_length, output_dims=config.hidden_size, hidden_dims=config.intermediate_size, ) self.freq_emb = nn.Embedding(num_embeddings=config.freq_size, embedding_dim=config.hidden_size) self.layers = nn.ModuleList( [TimesFmDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) if self.config.use_positional_embedding: self.position_emb = TimesFmPositionalEmbedding(config=config) # Initialize weights and apply final processing self.post_init() def _forward_transform( self, inputs: torch.Tensor, patched_pads: torch.Tensor ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: """Input is of shape [B, N, P].""" mu, sigma = self._timesfm_masked_mean_std(inputs, patched_pads) sigma = torch.where( sigma < self.config.tolerance, torch.tensor(1.0, dtype=sigma.dtype, device=sigma.device), sigma, ) # Normalize each patch outputs = (inputs - mu[:, None, None]) / sigma[:, None, None] outputs = torch.where( torch.abs(inputs - self.config.pad_val) < self.config.tolerance, torch.tensor(self.config.pad_val, dtype=outputs.dtype, device=outputs.device), outputs, ) return outputs, (mu, sigma) @can_return_tuple @auto_docstring def forward( self, past_values: torch.Tensor, past_values_padding: torch.LongTensor, freq: torch.Tensor, output_attentions: bool = False, output_hidden_states: bool = False, ) -> TimesFmOutput: r""" past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Past values of the time series that serves as input to the model. past_values_padding (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The padding indicator of the time series. freq (`torch.LongTensor` of shape `(batch_size,)`): Frequency indices for the time series data. """ # Reshape into patches (using view for efficiency) bsize = past_values.shape[0] patched_inputs = past_values.view(bsize, -1, self.config.patch_length) patched_pads = past_values_padding.view(bsize, -1, self.config.patch_length) patched_inputs = torch.where( torch.abs(patched_pads - 1.0) < self.config.tolerance, torch.tensor(0.0, dtype=patched_inputs.dtype, device=patched_inputs.device), patched_inputs, ) patched_pads = torch.where( torch.abs(patched_inputs - self.config.pad_val) < self.config.tolerance, torch.tensor(1.0, dtype=patched_pads.dtype, device=patched_pads.device), patched_pads, ) patched_inputs, stats = self._forward_transform(patched_inputs, patched_pads) # B x N x D patched_inputs = patched_inputs * (1.0 - patched_pads) concat_inputs = torch.cat([patched_inputs, patched_pads], dim=-1) model_input = self.input_ff_layer(concat_inputs) # A patch should not be padded even if there is at least one zero. patched_padding = torch.min(patched_pads, dim=-1)[0] # Get the values from the min result if self.config.use_positional_embedding: pos_emb = self.position_emb(model_input.shape[1]) pos_emb = torch.concat([pos_emb] * model_input.shape[0], dim=0) pos_emb = self._timesfm_shift_padded_seq(patched_padding, pos_emb) model_input += pos_emb f_emb = self.freq_emb(freq) # B x 1 x D model_input += f_emb # Convert paddings to attention mask and combine with causal mask hidden_states = model_input attention_mask = self._prepare_4d_attention_mask( attention_mask=patched_padding, sequence_length=hidden_states.shape[1], dtype=hidden_states.dtype, device=hidden_states.device, is_causal=True, ) all_attentions = [] all_hidden_states = [] for layer in self.layers[: self.config.num_hidden_layers]: scores, hidden_states = layer( hidden_states=hidden_states, attention_mask=attention_mask, paddings=patched_padding, output_attentions=output_attentions, ) if output_attentions: all_attentions.append(scores) if output_hidden_states: all_hidden_states.append(hidden_states) if output_hidden_states: all_hidden_states = [model_input] + all_hidden_states else: all_hidden_states = None return TimesFmOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions if output_attentions else None, loc=stats[0], scale=stats[1], ) @staticmethod def _prepare_4d_attention_mask( attention_mask: Optional[torch.Tensor], sequence_length: int, dtype: torch.dtype, device: torch.device, is_causal: bool = True, ) -> Optional[torch.Tensor]: """ Creates 4D attention mask and combines causal and padding masks if needed. Args: attention_mask: Optional tensor of shape (batch_size, seq_length) containing padding mask sequence_length: Length of the sequence dtype: Data type of the mask device: Device of the mask is_causal: Whether to apply causal masking Returns: 4D attention mask of shape (batch_size, 1, seq_length, seq_length) """ # Get minimum value for the dtype min_value = torch.finfo(dtype).min if dtype.is_floating_point else torch.iinfo(dtype).min # Handle padding mask if attention_mask is not None: # Convert 2D padding mask to 4D attention mask attention_mask = attention_mask.view(attention_mask.shape[0], 1, 1, -1) attention_mask = attention_mask * min_value # Create causal mask if needed if is_causal: causal_mask = torch.triu( torch.ones((sequence_length, sequence_length), dtype=dtype, device=device) * min_value, diagonal=1, ) causal_mask = causal_mask.view(1, 1, sequence_length, sequence_length) # Combine with padding mask if it exists if attention_mask is not None: attention_mask = torch.minimum(attention_mask, causal_mask) else: attention_mask = causal_mask return attention_mask @staticmethod def _timesfm_masked_mean_std(inputs: torch.Tensor, padding: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Calculates mean and standard deviation of `inputs` across axis 1. It excludes values where `padding` is 1. Args: inputs: A PyTorch tensor of shape [b, n, p]. padding: A PyTorch tensor of shape [b, n, p] with values 0 or 1. Returns: A tuple containing the mean and standard deviation. We return the statistics of the first patch with more than three non-padded values. """ # Selecting the first patch with more than 3 unpadded values. def _get_patch_index(arr: torch.Tensor): indices = torch.argmax((arr >= 3).to(torch.int32), dim=1) row_sum = (arr >= 3).to(torch.int32).sum(dim=1) return torch.where(row_sum == 0, arr.shape[1] - 1, indices) pad_sum = torch.sum(1 - padding, dim=2) patch_indices = _get_patch_index(pad_sum) bidxs = torch.arange(inputs.shape[0]) arr = inputs[bidxs, patch_indices, :] pad = padding[bidxs, patch_indices, :] # Create a mask where padding is 0 mask = 1 - pad # Calculate the number of valid elements num_valid_elements = torch.sum(mask, dim=1) num_valid_elements = torch.where( num_valid_elements == 0, torch.tensor(1, dtype=num_valid_elements.dtype, device=num_valid_elements.device), num_valid_elements, ) # Calculate the masked sum and squared sum masked_sum = torch.sum(arr * mask, dim=1) masked_squared_sum = torch.sum((arr * mask) ** 2, dim=1) # Calculate the masked mean and standard deviation masked_mean = masked_sum / num_valid_elements masked_var = masked_squared_sum / num_valid_elements - masked_mean**2 masked_var = torch.where( masked_var < 0.0, torch.tensor(0.0, dtype=masked_var.dtype, device=masked_var.device), masked_var, ) masked_std = torch.sqrt(masked_var) return masked_mean, masked_std @staticmethod def _timesfm_shift_padded_seq(mask: torch.Tensor, seq: torch.Tensor) -> torch.Tensor: """Shifts rows of seq based on the first 0 in each row of the mask. Args: mask: mask tensor of shape [B, N] seq: seq tensor of shape [B, N, P] Returns: The shifted sequence. """ batch_size, num_seq, feature_dim = seq.shape new_mask: torch.BoolTensor = mask == 0 # Use argmax to find the first True value in each row indices = new_mask.to(torch.int32).argmax(dim=1) # Handle rows with all zeros indices[~new_mask.any(dim=1)] = -1 # Create index ranges for each sequence in the batch idx_range = torch.arange(num_seq, device=seq.device).view(1, -1, 1).expand(batch_size, -1, feature_dim) # Calculate shifted indices for each element in each sequence shifted_idx = (idx_range - indices[:, None, None]) % num_seq # Gather values from seq using shifted indices shifted_seq = seq.gather(1, shifted_idx) return shifted_seq class TimesFmModelForPrediction(TimesFmPreTrainedModel): """TimesFM model for quantile and mean prediction.""" def __init__(self, config: TimesFmConfig): super().__init__(config) self.config = config self.context_len = config.context_length self.horizon_len = config.horizon_length self.decoder = TimesFmModel(config) # quantile and mean output self.horizon_ff_layer = TimesFmResidualBlock( input_dims=config.hidden_size, output_dims=config.horizon_length * (1 + len(config.quantiles)), hidden_dims=config.intermediate_size, ) # Initialize weights and apply final processing self.post_init() def _preprocess( self, inputs: Sequence[torch.Tensor], freq: Sequence[int] ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Formats and pads raw inputs to feed into the model. This function both pads each time series to match the context length, and pads the inputs to meet the SPMD shape requirement. Args: inputs: A list of 1d Tensors. Each Tensor is the context time series of a single forecast task. freq: list of frequencies Returns: A tuple of: - the padded input time series to meet the model required context. - the padding indicator. - the number of padded examples for SPMD so that each core has the same number (a multiple of `batch_size`) of examples. """ input_ts, input_padding, inp_freq = [], [], [] for i, ts in enumerate(inputs): input_len = ts.shape[0] padding = torch.zeros(input_len + self.horizon_len, dtype=ts.dtype, device=ts.device) if input_len < self.context_len: num_front_pad = self.context_len - input_len ts = torch.cat([torch.zeros(num_front_pad, dtype=ts.dtype, device=ts.device), ts], dim=0) padding = torch.cat([torch.ones(num_front_pad, dtype=ts.dtype, device=padding.device), padding], dim=0) elif input_len > self.context_len: ts = ts[-self.context_len :] padding = padding[-(self.context_len + self.horizon_len) :] input_ts.append(ts) input_padding.append(padding) inp_freq.append(freq[i]) return ( torch.stack(input_ts, dim=0), torch.stack(input_padding, dim=0), torch.tensor(inp_freq, dtype=torch.int32).reshape(-1, 1), ) def _postprocess_output( self, model_output: torch.Tensor, stats: tuple[torch.Tensor, torch.Tensor] ) -> torch.Tensor: """Postprocess output of stacked transformer.""" # B x N x (H.Q) output_ts = self.horizon_ff_layer(model_output) # Reshape using view b, n, _ = output_ts.shape output_ts = output_ts.view(b, n, self.config.horizon_length, len(self.config.quantiles) + 1) mu, sigma = stats return output_ts * sigma[:, None, None, None] + mu[:, None, None, None] def _quantile_loss(self, predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor: losses = [] for i, q in enumerate(self.config.quantiles): errors = targets - predictions[..., i] loss = torch.max((q - 1) * errors, q * errors) losses.append(loss.mean()) return torch.stack(losses).mean() @can_return_tuple @auto_docstring def forward( self, past_values: Sequence[torch.Tensor], freq: Optional[Sequence[Union[torch.Tensor, int]]] = None, window_size: Optional[int] = None, future_values: Optional[torch.Tensor] = None, forecast_context_len: Optional[int] = None, return_forecast_on_context: bool = False, truncate_negative: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> TimesFmOutputForPrediction: r""" past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Past values of the time series that serves as input to the model. freq (`torch.LongTensor` of shape `(batch_size,)`): Frequency indices for the time series data. window_size (`int`, *optional*): Window size of trend + residual decomposition. If None then we do not do decomposition. future_values (`torch.Tensor`, *optional*): Optional future time series values to be used for loss computation. forecast_context_len (`int`, *optional*): Optional max context length. return_forecast_on_context (`bool`, *optional*): True to return the forecast on the context when available, i.e. after the first input patch. truncate_negative (`bool`, *optional*): Truncate to only non-negative values if any of the contexts have non-negative values, otherwise do nothing. output_attentions (`bool`, *optional*): Whether to output the attentions. output_hidden_states (`bool`, *optional*): Whether to output the hidden states. Example: ```python >>> from transformers import TimesFmModelForPrediction >>> model = TimesFmModelForPrediction.from_pretrained("google/timesfm-2.0-500m-pytorch") >>> forecast_input = [torch.linspace(0, 20, 100).sin(), torch.linspace(0, 20, 200).sin(), torch.linspace(0, 20, 400).sin()] >>> frequency_input = torch.tensor([0, 1, 2], dtype=torch.long) >>> # Generate >>> with torch.no_grad(): >>> outputs = model(past_values=forecast_input, freq=frequency_input, return_dict=True) >>> point_forecast_conv = outputs.mean_predictions >>> quantile_forecast_conv = outputs.full_predictions ``` """ if forecast_context_len is None: fcontext_len = self.context_len else: fcontext_len = forecast_context_len # Get device from first input tensor device = past_values[0].device # Truncate inputs to forecast_context_len inputs = [ts[-fcontext_len:] for ts in past_values] inp_min = torch.min(torch.stack([torch.min(ts) for ts in inputs])) if window_size is not None: new_inputs = [] new_freqs = [] for i, ts in enumerate(inputs): new_inputs.extend(self._timesfm_moving_average(ts, window_size)) if freq is not None: new_freqs.extend([freq[i]] * 2) inputs = new_inputs if freq is not None: freq = new_freqs if freq is None: logger.info("No frequency provided via `freq`. Default to high (0).") freq = [0] * len(inputs) if output_attentions is None: output_attentions = self.config.output_attentions if output_hidden_states is None: output_hidden_states = self.config.output_hidden_states input_ts, input_padding, inp_freq = self._preprocess(inputs, freq) # Move tensors to the same device as input input_ts = input_ts.to(device) input_padding = input_padding.to(device) inp_freq = inp_freq.to(device) final_out = input_ts context_len = final_out.shape[1] full_outputs = [] if input_padding.shape[1] != final_out.shape[1] + self.horizon_len: raise ValueError( "Length of paddings must match length of input + horizon_len:" f" {input_padding.shape[1]} != {final_out.shape[1]} + {self.horizon_len}" ) output_patch_len = self.config.horizon_length num_decode_patches = (self.horizon_len + output_patch_len - 1) // output_patch_len for step_index in range(num_decode_patches): current_padding = input_padding[:, 0 : final_out.shape[1]] input_ts = final_out[:, -fcontext_len:] input_padding = current_padding[:, -fcontext_len:] decoder_output = self.decoder( past_values=input_ts, past_values_padding=input_padding, freq=inp_freq, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) fprop_outputs = self._postprocess_output( decoder_output.last_hidden_state, (decoder_output.loc, decoder_output.scale), ) if return_forecast_on_context and step_index == 0: # For the first decodings step, collect the model forecast on the # context except the unavailable first input batch forecast. new_full_ts = fprop_outputs[:, :-1, : self.config.patch_length, :] # We have to use reshape and not view for non-contiguous memory new_full_ts = new_full_ts.reshape(new_full_ts.size(0), -1, new_full_ts.size(3)) full_outputs.append(new_full_ts) # (full batch, last patch, output_patch_len, index of mean forecast = 0) new_ts = fprop_outputs[:, -1, :output_patch_len, 0] new_full_ts = fprop_outputs[:, -1, :output_patch_len, :] # (full batch, last patch, output_patch_len, all output indices) full_outputs.append(new_full_ts) final_out = torch.concatenate([final_out, new_ts], axis=-1) if return_forecast_on_context: # `full_outputs` indexing starts at after the first input patch. full_outputs = torch.concatenate(full_outputs, axis=1)[ :, : (context_len - self.config.patch_length + self.horizon_len), : ] else: # `full_outputs` indexing starts at the forecast horizon. full_outputs = torch.concatenate(full_outputs, axis=1)[:, 0 : self.horizon_len, :] mean_outputs = full_outputs[:, :, 0] if window_size is not None: mean_outputs = mean_outputs[0::2, ...] + mean_outputs[1::2, ...] full_outputs = full_outputs[0::2, ...] + full_outputs[1::2, ...] if inp_min >= 0 and truncate_negative: mean_outputs = torch.maximum(mean_outputs, 0.0) full_outputs = torch.maximum(full_outputs, 0.0) loss = None if future_values is not None: mse_loss = F.mse_loss(mean_outputs, future_values) quantile_loss = self._quantile_loss(full_outputs[:, :, 1:], future_values) loss = mse_loss + quantile_loss return TimesFmOutputForPrediction( last_hidden_state=decoder_output.last_hidden_state, attentions=decoder_output.attentions if output_attentions else None, hidden_states=decoder_output.hidden_states if output_hidden_states else None, mean_predictions=mean_outputs, full_predictions=full_outputs, loss=loss, ) @staticmethod def _timesfm_moving_average(arr: torch.Tensor, window_size: int) -> list[torch.Tensor]: """Calculates the moving average using PyTorch's convolution function.""" # Pad with zeros to handle initial window positions arr_padded = F.pad(arr, (window_size - 1, 0), "constant", 0) # Create a convolution kernel kernel = torch.ones(window_size, dtype=arr.dtype, device=arr.device) / window_size # Apply convolution to calculate the moving average smoothed_arr = F.conv1d(arr_padded.view(1, 1, -1), kernel.view(1, 1, -1)).squeeze() return [smoothed_arr, arr - smoothed_arr] __all__ = ["TimesFmModelForPrediction", "TimesFmPreTrainedModel", "TimesFmModel"]
transformers/src/transformers/models/timesfm/modeling_timesfm.py/0
{ "file_path": "transformers/src/transformers/models/timesfm/modeling_timesfm.py", "repo_id": "transformers", "token_count": 15188 }
542
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TrOCR decoder model (based on RoBERTa).""" import math from typing import Optional, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging from ...utils.deprecation import deprecate_kwarg from .configuration_trocr import TrOCRConfig logger = logging.get_logger(__name__) # Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->TrOCR class TrOCRLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): # TrOCR is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models don't have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0, position_ids: torch.Tensor = None): """`input_ids' shape is expected to be [bsz x seqlen].""" if position_ids is None: bsz, seq_len = input_ids.shape[:2] position_ids = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ).expand(bsz, -1) else: position_ids = position_ids.unsqueeze(0) return super().forward(position_ids + self.offset) # Copied from transformers.models.bart.modeling_bart.BartScaledWordEmbedding with Bart->TrOCR class TrOCRScaledWordEmbedding(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale class TrOCRSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.weights = self.get_embedding(num_positions, embedding_dim, padding_idx) self.register_buffer("_float_tensor", torch.FloatTensor(1)) @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb) emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb.to(torch.get_default_dtype()) @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = self.get_embedding(max_pos, self.embedding_dim, self.padding_idx) self.weights = self.weights.to(self._float_tensor) x = self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() return x def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx class TrOCRAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper.""" def __init__( self, config, embed_dim: int, num_heads: int, kdim: Optional[int] = None, vdim: Optional[int] = None, dropout: Optional[float] = 0.0, is_decoder: Optional[bool] = False, bias: Optional[bool] = True, is_cross_attention: Optional[bool] = False, layer_idx: Optional[bool] = None, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if not (self.head_dim * num_heads == self.embed_dim): raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.layer_idx = layer_idx self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias) self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, cache_position: Optional[torch.Tensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling if past_key_values is not None: if isinstance(past_key_values, EncoderDecoderCache): is_updated = past_key_values.is_updated.get(self.layer_idx) if is_cross_attention: # after the first generated id, we can subsequently re-use all key/value_states from cache curr_past_key_value = past_key_values.cross_attention_cache else: curr_past_key_value = past_key_values.self_attention_cache else: curr_past_key_value = past_key_values current_states = key_value_states if is_cross_attention else hidden_states if is_cross_attention and past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = curr_past_key_value.layers[self.layer_idx].keys value_states = curr_past_key_value.layers[self.layer_idx].values else: key_states = self.k_proj(current_states) value_states = self.v_proj(current_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: # save all key/value_states to cache to be re-used for fast auto-regressive generation cache_position = cache_position if not is_cross_attention else None key_states, value_states = curr_past_key_value.update( key_states, value_states, self.layer_idx, {"cache_position": cache_position} ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls if is_cross_attention: past_key_values.is_updated[self.layer_idx] = True proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2) query_states = query_states.reshape(*proj_shape) key_states = key_states.reshape(*proj_shape) value_states = value_states.reshape(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class TrOCRDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: TrOCRConfig, layer_idx=None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = TrOCRAttention( config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, layer_idx=layer_idx, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) if config.is_decoder: self.encoder_attn = TrOCRAttention( config, embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, kdim=config.cross_attention_hidden_size, vdim=config.cross_attention_hidden_size, dropout=config.attention_dropout, is_decoder=True, is_cross_attention=True, layer_idx=layer_idx, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size *(decoder_attention_heads,)*. past_key_values (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs @auto_docstring class TrOCRPreTrainedModel(PreTrainedModel): config: TrOCRConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["TrOCRDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() class TrOCRDecoder(TrOCRPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TrOCRDecoderLayer`] Args: config: TrOCRConfig """ def __init__(self, config: TrOCRConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0 self.embed_tokens = TrOCRScaledWordEmbedding( config.vocab_size, config.hidden_size, self.padding_idx, embed_scale=embed_scale ) if config.use_learned_position_embeddings: self.embed_positions = TrOCRLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) else: self.embed_positions = TrOCRSinusoidalPositionalEmbedding( config.max_position_embeddings + self.padding_idx + 1, config.hidden_size, self.padding_idx, ) if config.layernorm_embedding: self.layernorm_embedding = nn.LayerNorm(config.hidden_size) else: self.layernorm_embedding = None self.layers = nn.ModuleList([TrOCRDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_ids = input_ids.view(-1, input.shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(), DynamicCache()) if encoder_hidden_states is not None else DynamicCache() ) if use_cache and isinstance(past_key_values, tuple): logger.warning_once( "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " "You should pass an instance of `EncoderDecoderCache` instead, e.g. " "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`." ) past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if self.config.use_learned_position_embeddings: embed_pos = self.embed_positions(input, past_key_values_length=past_key_values_length) else: embed_pos = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + embed_pos if self.layernorm_embedding is not None: hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) input_shape = input.shape attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring( custom_intro=""" The TrOCR Model with a language modeling head. Can be used for summarization. This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ ) class TrOCRDecoderWrapper(TrOCRPreTrainedModel): def __init__(self, config): super().__init__(config) self.decoder = TrOCRDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) @auto_docstring( custom_intro=""" The TrOCR Decoder with a language modeling head. Can be used as the decoder part of [`EncoderDecoderModel`] and """ ) class TrOCRForCausalLM(TrOCRPreTrainedModel, GenerationMixin): _tied_weights_keys = ["output_projection.weight"] def __init__(self, config): config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = TrOCRDecoderWrapper(config) self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.output_projection def set_output_embeddings(self, new_embeddings): self.output_projection = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, CausalLMOutputWithCrossAttentions]: r""" cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import ( ... TrOCRConfig, ... TrOCRProcessor, ... TrOCRForCausalLM, ... ViTConfig, ... ViTModel, ... VisionEncoderDecoderModel, ... ) >>> import requests >>> from PIL import Image >>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel >>> # init vision2text model with random weights >>> encoder = ViTModel(ViTConfig()) >>> decoder = TrOCRForCausalLM(TrOCRConfig()) >>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) >>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel` >>> processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") >>> model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") >>> # load image from the IAM dataset >>> url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> pixel_values = processor(image, return_tensors="pt").pixel_values >>> text = "industry, ' Mr. Brown commented icily. ' Let us have a" >>> # training >>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id >>> model.config.pad_token_id = processor.tokenizer.pad_token_id >>> model.config.vocab_size = model.config.decoder.vocab_size >>> labels = processor.tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(pixel_values, labels=labels) >>> loss = outputs.loss >>> round(loss.item(), 2) 5.30 >>> # inference >>> generated_ids = model.generate(pixel_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> generated_text 'industry, " Mr. Brown commented icily. " Let us have a' ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) logits = self.output_projection(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) __all__ = ["TrOCRForCausalLM", "TrOCRPreTrainedModel"]
transformers/src/transformers/models/trocr/modeling_trocr.py/0
{ "file_path": "transformers/src/transformers/models/trocr/modeling_trocr.py", "repo_id": "transformers", "token_count": 17225 }
543
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from transformers import UnivNetConfig, UnivNetModel, logging logging.set_verbosity_info() logger = logging.get_logger("transformers.models.univnet") def get_kernel_predictor_key_mapping(config: UnivNetConfig, old_prefix: str = "", new_prefix: str = ""): mapping = {} # Initial conv layer mapping[f"{old_prefix}.input_conv.0.weight_g"] = f"{new_prefix}.input_conv.weight_g" mapping[f"{old_prefix}.input_conv.0.weight_v"] = f"{new_prefix}.input_conv.weight_v" mapping[f"{old_prefix}.input_conv.0.bias"] = f"{new_prefix}.input_conv.bias" # Kernel predictor resnet blocks for i in range(config.kernel_predictor_num_blocks): mapping[f"{old_prefix}.residual_convs.{i}.1.weight_g"] = f"{new_prefix}.resblocks.{i}.conv1.weight_g" mapping[f"{old_prefix}.residual_convs.{i}.1.weight_v"] = f"{new_prefix}.resblocks.{i}.conv1.weight_v" mapping[f"{old_prefix}.residual_convs.{i}.1.bias"] = f"{new_prefix}.resblocks.{i}.conv1.bias" mapping[f"{old_prefix}.residual_convs.{i}.3.weight_g"] = f"{new_prefix}.resblocks.{i}.conv2.weight_g" mapping[f"{old_prefix}.residual_convs.{i}.3.weight_v"] = f"{new_prefix}.resblocks.{i}.conv2.weight_v" mapping[f"{old_prefix}.residual_convs.{i}.3.bias"] = f"{new_prefix}.resblocks.{i}.conv2.bias" # Kernel output conv mapping[f"{old_prefix}.kernel_conv.weight_g"] = f"{new_prefix}.kernel_conv.weight_g" mapping[f"{old_prefix}.kernel_conv.weight_v"] = f"{new_prefix}.kernel_conv.weight_v" mapping[f"{old_prefix}.kernel_conv.bias"] = f"{new_prefix}.kernel_conv.bias" # Bias output conv mapping[f"{old_prefix}.bias_conv.weight_g"] = f"{new_prefix}.bias_conv.weight_g" mapping[f"{old_prefix}.bias_conv.weight_v"] = f"{new_prefix}.bias_conv.weight_v" mapping[f"{old_prefix}.bias_conv.bias"] = f"{new_prefix}.bias_conv.bias" return mapping def get_key_mapping(config: UnivNetConfig): mapping = {} # NOTE: initial conv layer keys are the same # LVC Residual blocks for i in range(len(config.resblock_stride_sizes)): # LVCBlock initial convt layer mapping[f"res_stack.{i}.convt_pre.1.weight_g"] = f"resblocks.{i}.convt_pre.weight_g" mapping[f"res_stack.{i}.convt_pre.1.weight_v"] = f"resblocks.{i}.convt_pre.weight_v" mapping[f"res_stack.{i}.convt_pre.1.bias"] = f"resblocks.{i}.convt_pre.bias" # Kernel predictor kernel_predictor_mapping = get_kernel_predictor_key_mapping( config, old_prefix=f"res_stack.{i}.kernel_predictor", new_prefix=f"resblocks.{i}.kernel_predictor" ) mapping.update(kernel_predictor_mapping) # LVC Residual blocks for j in range(len(config.resblock_dilation_sizes[i])): mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_g"] = f"resblocks.{i}.resblocks.{j}.conv.weight_g" mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_v"] = f"resblocks.{i}.resblocks.{j}.conv.weight_v" mapping[f"res_stack.{i}.conv_blocks.{j}.1.bias"] = f"resblocks.{i}.resblocks.{j}.conv.bias" # Output conv layer mapping["conv_post.1.weight_g"] = "conv_post.weight_g" mapping["conv_post.1.weight_v"] = "conv_post.weight_v" mapping["conv_post.1.bias"] = "conv_post.bias" return mapping def rename_state_dict(state_dict, keys_to_modify, keys_to_remove): model_state_dict = {} for key, value in state_dict.items(): if key in keys_to_remove: continue if key in keys_to_modify: new_key = keys_to_modify[key] model_state_dict[new_key] = value else: model_state_dict[key] = value return model_state_dict def convert_univnet_checkpoint( checkpoint_path, pytorch_dump_folder_path, config_path=None, repo_id=None, safe_serialization=False, ): model_state_dict_base = torch.load(checkpoint_path, map_location="cpu", weights_only=True) # Get the generator's state dict state_dict = model_state_dict_base["model_g"] if config_path is not None: config = UnivNetConfig.from_pretrained(config_path) else: config = UnivNetConfig() keys_to_modify = get_key_mapping(config) keys_to_remove = set() hf_state_dict = rename_state_dict(state_dict, keys_to_modify, keys_to_remove) model = UnivNetModel(config) # Apply weight norm since the original checkpoint has weight norm applied model.apply_weight_norm() model.load_state_dict(hf_state_dict) # Remove weight norm in preparation for inference model.remove_weight_norm() model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization) if repo_id: print("Pushing to the hub...") model.push_to_hub(repo_id) def main(): parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors`." ) args = parser.parse_args() convert_univnet_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, args.safe_serialization, ) if __name__ == "__main__": main()
transformers/src/transformers/models/univnet/convert_univnet.py/0
{ "file_path": "transformers/src/transformers/models/univnet/convert_univnet.py", "repo_id": "transformers", "token_count": 2618 }
544
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VideoMAE model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class VideoMAEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VideoMAE [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. num_frames (`int`, *optional*, defaults to 16): The number of frames in each video. tubelet_size (`int`, *optional*, defaults to 2): The number of tubelets. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. use_mean_pooling (`bool`, *optional*, defaults to `True`): Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token. decoder_num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the decoder. decoder_hidden_size (`int`, *optional*, defaults to 384): Dimensionality of the decoder. decoder_num_hidden_layers (`int`, *optional*, defaults to 4): Number of hidden layers in the decoder. decoder_intermediate_size (`int`, *optional*, defaults to 1536): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. norm_pix_loss (`bool`, *optional*, defaults to `True`): Whether to normalize the target patch pixels. Example: ```python >>> from transformers import VideoMAEConfig, VideoMAEModel >>> # Initializing a VideoMAE videomae-base style configuration >>> configuration = VideoMAEConfig() >>> # Randomly initializing a model from the configuration >>> model = VideoMAEModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "videomae" def __init__( self, image_size=224, patch_size=16, num_channels=3, num_frames=16, tubelet_size=2, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, use_mean_pooling=True, decoder_num_attention_heads=6, decoder_hidden_size=384, decoder_num_hidden_layers=4, decoder_intermediate_size=1536, norm_pix_loss=True, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.num_frames = num_frames self.tubelet_size = tubelet_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.use_mean_pooling = use_mean_pooling self.decoder_num_attention_heads = decoder_num_attention_heads self.decoder_hidden_size = decoder_hidden_size self.decoder_num_hidden_layers = decoder_num_hidden_layers self.decoder_intermediate_size = decoder_intermediate_size self.norm_pix_loss = norm_pix_loss __all__ = ["VideoMAEConfig"]
transformers/src/transformers/models/videomae/configuration_videomae.py/0
{ "file_path": "transformers/src/transformers/models/videomae/configuration_videomae.py", "repo_id": "transformers", "token_count": 2532 }
545
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from src/transformers/models/vipllava/modular_vipllava.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_vipllava.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2023 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Union import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...generation import GenerationMixin from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, can_return_tuple from ..auto import AutoModel from .configuration_vipllava import VipLlavaConfig @dataclass @auto_docstring( custom_intro=""" Base class for VipLlava outputs, with hidden states and attentions. """ ) class VipLlavaModelOutputWithPast(BaseModelOutputWithPast): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for VipLlava causal language model (or autoregressive) outputs. """ ) class VipLlavaCausalLMOutputWithPast(ModelOutput): r""" loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[list[torch.FloatTensor]] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None class VipLlavaMultiModalProjector(nn.Module): def __init__(self, config: VipLlavaConfig): super().__init__() num_feature_layers = 1 if isinstance(config.vision_feature_layers, int) else len(config.vision_feature_layers) self.projector_layernorm = nn.LayerNorm( num_feature_layers * config.vision_config.hidden_size, eps=config.projector_layernorm_eps ) self.linear_1 = nn.Linear( num_feature_layers * config.vision_config.hidden_size, config.text_config.hidden_size, bias=True, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True) def forward(self, hidden_states): hidden_states = self.projector_layernorm(hidden_states) hidden_states = self.linear_1(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states @auto_docstring class VipLlavaPreTrainedModel(PreTrainedModel): config: VipLlavaConfig base_model_prefix = "" supports_gradient_checkpointing = True _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_flex_attn = True _supports_attention_backend = True @auto_docstring( custom_intro=""" The VipLlava model which consists of a vision backbone and a language model, without a language modeling head. """ ) class VipLlavaModel(VipLlavaPreTrainedModel): _checkpoint_conversion_mapping = {"language_model.model": "language_model"} def __init__(self, config: VipLlavaConfig): super().__init__(config) self.vision_tower = AutoModel.from_config(config.vision_config) self.multi_modal_projector = VipLlavaMultiModalProjector(config) self.language_model = AutoModel.from_config(config.text_config) self.post_init() def get_input_embeddings(self): return self.language_model.get_input_embeddings() def set_input_embeddings(self, value): self.language_model.set_input_embeddings(value) def set_decoder(self, decoder): self.language_model = decoder def get_decoder(self): return self.language_model def get_image_features( self, pixel_values: torch.FloatTensor, vision_feature_layers: Optional[Union[int, list[int]]] = None ): """ Obtains image last hidden states from the vision tower and apply multimodal projection. Args: pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`) The tensors corresponding to the input images. vision_feature_layers (`Union[int, list[int]]`): The vision feature layer, or the list of indexes of the layers to select the vision feature. Returns: image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`). """ vision_feature_layers = ( vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers ) image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) # If multiple feature layers are provided (which is usually the case) # then the image features are concatenated after the CLS is removed. if isinstance(vision_feature_layers, int): image_features = image_outputs.hidden_states[vision_feature_layers][:, 1:] else: # Usually, we select the features from index 1: the layers -2, -5, -8, -11 and 6 image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers] image_features = torch.cat(image_features, dim=-1) image_features = self.multi_modal_projector(image_features) return image_features def get_placeholder_mask( self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor ): """ Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is equal to the length of multimodal features. If the lengths are different, an error is raised. """ if input_ids is None: special_image_mask = inputs_embeds == self.get_input_embeddings()( torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device) ) special_image_mask = special_image_mask.all(-1) else: special_image_mask = input_ids == self.config.image_token_id n_image_tokens = special_image_mask.sum() special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device) n_image_features = image_features.shape[0] * image_features.shape[1] if inputs_embeds[special_image_mask].numel() != image_features.numel(): raise ValueError( f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" ) return special_image_mask @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layers: Optional[Union[int, list[int]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **lm_kwargs, ) -> Union[tuple, VipLlavaModelOutputWithPast]: r""" vision_feature_layers (`Union[int, list[int]]`, *optional*): The vision feature layer, or the list of indexes of the layers to select the vision feature. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layers = ( vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers ) if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features( pixel_values=pixel_values, vision_feature_layers=vision_feature_layers ) image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **lm_kwargs, ) output = VipLlavaModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) return output if return_dict else output.to_tuple() @auto_docstring( custom_intro=""" The VIPLLAVA model which consists of a vision backbone and a language model. """ ) class VipLlavaForConditionalGeneration(VipLlavaPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = { "^language_model.model": "model.language_model", "^vision_tower": "model.vision_tower", "^multi_modal_projector": "model.multi_modal_projector", "^language_model.lm_head": "lm_head", } _tied_weights_keys = ["lm_head.weight"] def __init__(self, config: VipLlavaConfig): super().__init__(config) self.model = VipLlavaModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def get_output_embeddings(self) -> nn.Module: return self.lm_head def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_image_features( self, pixel_values: torch.FloatTensor, vision_feature_layers: Optional[Union[int, list[int]]] = None ): return self.model.get_image_features(pixel_values=pixel_values, vision_feature_layers=vision_feature_layers) # Make modules available through conditional class for BC @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward( self, input_ids: torch.LongTensor = None, pixel_values: torch.FloatTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, vision_feature_layers: Optional[Union[int, list[int]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **lm_kwargs, ) -> Union[tuple, VipLlavaCausalLMOutputWithPast]: r""" vision_feature_layers (`Union[int, list[int]]`, *optional*): The vision feature layer, or the list of indexes of the layers to select the vision feature. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> import torch >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, VipLlavaForConditionalGeneration >>> model = VipLlavaForConditionalGeneration.from_pretrained("llava-hf/vip-llava-7b-hf", device_map="auto", dtype=torch.float16) >>> processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf") >>> prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n{}###Assistant:" >>> question = "Can you please describe this image?" >>> prompt = prompt.format(question) >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=text, images=image, return_tensors="pt").to(0, torch.float16) >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=20) >>> processor.decode(generate_ids[0][len(inputs["input_ids"][0]):], skip_special_tokens=True) The image features a brown and white cat sitting on a green surface, with a red ball in its ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_feature_layers = ( vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers ) outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, vision_feature_layers=vision_feature_layers, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **lm_kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size) return VipLlavaCausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, cache_position=None, logits_to_keep=None, **kwargs, ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model model_inputs = super().prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, ) if cache_position[0] == 0: # If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore # Otherwise we need pixel values to be passed to model model_inputs["pixel_values"] = pixel_values return model_inputs __all__ = ["VipLlavaModel", "VipLlavaForConditionalGeneration", "VipLlavaPreTrainedModel"]
transformers/src/transformers/models/vipllava/modeling_vipllava.py/0
{ "file_path": "transformers/src/transformers/models/vipllava/modeling_vipllava.py", "repo_id": "transformers", "token_count": 8474 }
546
# coding=utf-8 # Copyright 2021 The UCLA NLP Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch VisualBERT model.""" import math from dataclasses import dataclass from typing import Optional, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss, KLDivLoss, LogSoftmax from ...activations import ACT2FN from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MultipleChoiceModelOutput, SequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, auto_docstring, logging from .configuration_visual_bert import VisualBertConfig logger = logging.get_logger(__name__) class VisualBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings and visual embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) # For Visual Features # Token type and position embedding for image features self.visual_token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.visual_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) if config.special_visual_initialize: self.visual_token_type_embeddings.weight.data = nn.Parameter( self.token_type_embeddings.weight.data.clone(), requires_grad=True ) self.visual_position_embeddings.weight.data = nn.Parameter( self.position_embeddings.weight.data.clone(), requires_grad=True ) self.visual_projection = nn.Linear(config.visual_embedding_dim, config.hidden_size) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, visual_embeds=None, visual_token_type_ids=None, image_text_alignment=None, ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings # Absolute Position Embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings if visual_embeds is not None: if visual_token_type_ids is None: visual_token_type_ids = torch.ones( visual_embeds.size()[:-1], dtype=torch.long, device=self.position_ids.device ) visual_embeds = self.visual_projection(visual_embeds) visual_token_type_embeddings = self.visual_token_type_embeddings(visual_token_type_ids) if image_text_alignment is not None: # image_text_alignment = Batch x image_length x alignment_number. # Each element denotes the position of the word corresponding to the image feature. -1 is the padding value. dtype = token_type_embeddings.dtype image_text_alignment_mask = (image_text_alignment != -1).long() # Get rid of the -1. image_text_alignment = image_text_alignment_mask * image_text_alignment # Batch x image_length x alignment length x dim visual_position_embeddings = self.position_embeddings(image_text_alignment) visual_position_embeddings *= image_text_alignment_mask.to(dtype=dtype).unsqueeze(-1) visual_position_embeddings = visual_position_embeddings.sum(2) # We want to average along the alignment_number dimension. image_text_alignment_mask = image_text_alignment_mask.to(dtype=dtype).sum(2) if (image_text_alignment_mask == 0).sum() != 0: image_text_alignment_mask[image_text_alignment_mask == 0] = 1 # Avoid divide by zero error logger.warning( "Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero" " error." ) visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(-1) visual_position_ids = torch.zeros( *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device ) # When fine-tuning the detector , the image_text_alignment is sometimes padded too long. if visual_position_embeddings.size(1) != visual_embeds.size(1): if visual_position_embeddings.size(1) < visual_embeds.size(1): raise ValueError( f"Visual position embeddings length: {visual_position_embeddings.size(1)} " f"should be the same as `visual_embeds` length: {visual_embeds.size(1)}" ) visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :] visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings( visual_position_ids ) else: visual_position_ids = torch.zeros( *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device ) visual_position_embeddings = self.visual_position_embeddings(visual_position_ids) visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings embeddings = torch.cat((embeddings, visual_embeddings), dim=1) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class VisualBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): batch_size, seq_length, _ = hidden_states.shape query_layer = ( self.query(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) key_layer = ( self.key(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) value_layer = ( self.value(hidden_states) .view(batch_size, -1, self.num_attention_heads, self.attention_head_size) .transpose(1, 2) ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in VisualBertSelfAttentionModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->VisualBert class VisualBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class VisualBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = VisualBertSelfAttention(config) self.output = VisualBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->VisualBert class VisualBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert class VisualBertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class VisualBertLayer(GradientCheckpointingLayer): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = VisualBertAttention(config) self.intermediate = VisualBertIntermediate(config) self.output = VisualBertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, ): self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class VisualBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([VisualBertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->VisualBert class VisualBertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->VisualBert class VisualBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert class VisualBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = VisualBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->VisualBert class VisualBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = VisualBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score @auto_docstring class VisualBertPreTrainedModel(PreTrainedModel): config: VisualBertConfig base_model_prefix = "visual_bert" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if hasattr(module, "bias") and module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, VisualBertLMPredictionHead): module.bias.data.zero_() @dataclass @auto_docstring( custom_intro=""" Output type of [`VisualBertForPreTraining`]. """ ) class VisualBertForPreTrainingOutput(ModelOutput): r""" loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the sentence-image prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation before SoftMax). """ loss: Optional[torch.FloatTensor] = None prediction_logits: Optional[torch.FloatTensor] = None seq_relationship_logits: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None @auto_docstring( custom_intro=""" The model can behave as an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ ) class VisualBertModel(VisualBertPreTrainedModel): def __init__(self, config, add_pooling_layer=True): r""" add_pooling_layer (bool, *optional*, defaults to `True`): Whether to add a pooling layer """ super().__init__(config) self.config = config self.embeddings = VisualBertEmbeddings(config) self.encoder = VisualBertEncoder(config) self.pooler = VisualBertPooler(config) if add_pooling_layer else None self.bypass_transformer = config.bypass_transformer if self.bypass_transformer: self.additional_layer = VisualBertLayer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image. from transformers import AutoTokenizer, VisualBertModel import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre") inputs = tokenizer("The capital of France is Paris.", return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update( { "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if visual_embeds is not None: visual_input_shape = visual_embeds.size()[:-1] if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if visual_embeds is not None and visual_attention_mask is None: visual_attention_mask = torch.ones(visual_input_shape, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if visual_embeds is not None: combined_attention_mask = torch.cat((attention_mask, visual_attention_mask), dim=-1) extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( combined_attention_mask, (batch_size, input_shape + visual_input_shape) ) else: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, (batch_size, input_shape) ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, ) if self.bypass_transformer and visual_embeds is not None: text_length = input_ids.size(1) text_embedding_output = embedding_output[:, :text_length, :] visual_embedding_output = embedding_output[:, text_length:, :] text_extended_attention_mask = extended_attention_mask[:, :, text_length, :text_length] encoded_outputs = self.encoder( text_embedding_output, attention_mask=text_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoded_outputs[0] concatenated_input = torch.cat((sequence_output, visual_embedding_output), dim=1) sequence_output = self.additional_layer(concatenated_input, extended_attention_mask) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None else: encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" VisualBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `sentence-image prediction (classification)` head. """ ) class VisualBertForPreTraining(VisualBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.visual_bert = VisualBertModel(config) self.cls = VisualBertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, sentence_image_labels: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], VisualBertForPreTrainingOutput]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` sentence_image_labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a matching pair of sequence A for the given image, - 1 indicates sequence B is a random sequence w.r.t A for the given image. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import AutoTokenizer, VisualBertForPreTraining tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update( { "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) max_length = inputs["input_ids"].shape[-1] + visual_embeds.shape[-2] labels = tokenizer( "The capital of France is Paris.", return_tensors="pt", padding="max_length", max_length=max_length )["input_ids"] sentence_image_labels = torch.tensor(1).unsqueeze(0) # Batch_size outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels) loss = outputs.loss prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: total_size = attention_mask.size(-1) + visual_attention_mask.size(-1) if labels.size(-1) != total_size: raise ValueError( "The labels provided should have same sequence length as total attention mask. " f"Found labels with sequence length {labels.size(-1)}, expected {total_size}." ) outputs = self.visual_bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and sentence_image_labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) sentence_image_loss = loss_fct(seq_relationship_score.view(-1, 2), sentence_image_labels.view(-1)) total_loss = masked_lm_loss + sentence_image_loss elif labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return VisualBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring class VisualBertForMultipleChoice(VisualBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.visual_bert = VisualBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import AutoTokenizer, VisualBertForMultipleChoice import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." visual_embeds = get_visual_embeddings(image) # (batch_size, num_choices, visual_seq_length, visual_embedding_dim) visual_embeds = visual_embeds.expand(1, 2, *visual_embeds.shape) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors="pt", padding=True) # batch size is 1 inputs_dict = {k: v.unsqueeze(0) for k, v in encoding.items()} inputs_dict.update( { "visual_embeds": visual_embeds, "visual_attention_mask": visual_attention_mask, "visual_token_type_ids": visual_token_type_ids, "labels": labels, } ) outputs = model(**inputs_dict) loss = outputs.loss logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) visual_embeds = ( visual_embeds.view(-1, visual_embeds.size(-2), visual_embeds.size(-1)) if visual_embeds is not None else None ) visual_attention_mask = ( visual_attention_mask.view(-1, visual_attention_mask.size(-1)) if visual_attention_mask is not None else None ) visual_token_type_ids = ( visual_token_type_ids.view(-1, visual_token_type_ids.size(-1)) if visual_token_type_ids is not None else None ) outputs = self.visual_bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) _, pooled_output = outputs[0], outputs[1] pooled_output = self.dropout(pooled_output) logits = self.cls(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled output) for VQA. """ ) class VisualBertForQuestionAnswering(VisualBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.visual_bert = VisualBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import AutoTokenizer, VisualBertForQuestionAnswering import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update( { "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) labels = torch.tensor([[0.0, 1.0]]).unsqueeze(0) # Batch size 1, Num labels 2 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Get the index of the last text token index_to_gather = attention_mask.sum(1) - 2 # as in original code outputs = self.visual_bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # TO-CHECK: From the original code index_to_gather = ( index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1)) ) pooled_output = torch.gather(sequence_output, 1, index_to_gather) pooled_output = self.dropout(pooled_output) logits = self.cls(pooled_output) reshaped_logits = logits.view(-1, self.num_labels) loss = None if labels is not None: loss_fct = nn.KLDivLoss(reduction="batchmean") log_softmax = nn.LogSoftmax(dim=-1) reshaped_logits = log_softmax(reshaped_logits) loss = loss_fct(reshaped_logits, labels.contiguous()) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring( custom_intro=""" VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled output) for Visual Reasoning e.g. for NLVR task. """ ) class VisualBertForVisualReasoning(VisualBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.visual_bert = VisualBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2 # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. A classification loss is computed (Cross-Entropy) against these labels. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import AutoTokenizer, VisualBertForVisualReasoning import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) inputs.update( { "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) labels = torch.tensor(1).unsqueeze(0) # Batch size 1, Num choices 2 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.visual_bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # sequence_output = outputs[0] pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.cls(pooled_output) reshaped_logits = logits.contiguous() loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class VisualBertRegionToPhraseAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = 1 # config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, query, key, attention_mask): batch_size, seq_length, _ = query.shape attention_mask = attention_mask.to(query.dtype) attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = (1.0 - attention_mask) * torch.finfo(query.dtype).min query_layer = ( self.query(query).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) ) key_layer = ( self.key(key).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) ) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores = attention_scores + attention_mask attention_scores = attention_scores.squeeze(1) return attention_scores @auto_docstring( custom_intro=""" VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment e.g. for Flickr30 Entities task. """ ) class VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.visual_bert = VisualBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.cls = VisualBertPreTrainingHeads(config) self.attention = VisualBertRegionToPhraseAttention(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, visual_embeds: Optional[torch.FloatTensor] = None, visual_attention_mask: Optional[torch.LongTensor] = None, visual_token_type_ids: Optional[torch.LongTensor] = None, image_text_alignment: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, region_to_phrase_position: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: r""" visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*): The embedded representation of the visual inputs, generally derived using using an object detector. visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*): Segment token indices to indicate different portions of the visual embeds. [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the *visual_token_type_ids* to *1* for all tokens. image_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*): Image-Text alignment uses to decide the position IDs of the visual embeddings. region_to_phrase_position (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*): The positions depicting the position of the image embedding corresponding to the textual tokens. labels (`torch.LongTensor` of shape `(batch_size, total_sequence_length, visual_sequence_length)`, *optional*): Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and the outputs from the attention layer. Example: ```python # Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch. from transformers import AutoTokenizer, VisualBertForRegionToPhraseAlignment import torch tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") model = VisualBertForRegionToPhraseAlignment.from_pretrained("uclanlp/visualbert-vqa-coco-pre") text = "Who is eating the apple?" inputs = tokenizer(text, return_tensors="pt") visual_embeds = get_visual_embeddings(image).unsqueeze(0) visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float) region_to_phrase_position = torch.ones((1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2])) inputs.update( { "region_to_phrase_position": region_to_phrase_position, "visual_embeds": visual_embeds, "visual_token_type_ids": visual_token_type_ids, "visual_attention_mask": visual_attention_mask, } ) labels = torch.ones( (1, inputs["input_ids"].shape[-1] + visual_embeds.shape[-2], visual_embeds.shape[-2]) ) # Batch size 1 outputs = model(**inputs, labels=labels) loss = outputs.loss scores = outputs.logits ```""" if region_to_phrase_position is None: raise ValueError("`region_to_phrase_position` should not be None when using Flickr Model.") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.visual_bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] region_to_phrase_position_mask = (region_to_phrase_position != -1).long() # Make the -1 become 0 region_to_phrase_position = region_to_phrase_position * region_to_phrase_position_mask # Selected_positions = batch x selected position x dim expanded_region_to_phrase_positions = region_to_phrase_position.unsqueeze(2).expand( region_to_phrase_position.size(0), region_to_phrase_position.size(1), sequence_output.size(2) ) selected_positions = sequence_output.gather(1, expanded_region_to_phrase_positions) # Visual Features = batch x visual_feature_length x dim # This will need separate image and visual masks. visual_features = sequence_output[:, attention_mask.size(1) :] if visual_features.size(1) != visual_attention_mask.size(1): raise ValueError( f"Visual features length :{visual_features.size(1)} should be the same" f" as visual attention mask length: {visual_attention_mask.size(1)}." ) logits = self.attention(selected_positions, visual_features, visual_attention_mask) loss = None if labels is not None: # scores = batch x selected position x visual_feature # scores = selected_positions.bmm(visual_features.transpose(1,2)) # label = batch x selected_postion x needed position loss_fct = KLDivLoss(reduction="batchmean") log_softmax = LogSoftmax(dim=-1) scores = log_softmax(logits) labels = labels.contiguous() loss = loss_fct(scores, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "VisualBertForMultipleChoice", "VisualBertForPreTraining", "VisualBertForQuestionAnswering", "VisualBertForRegionToPhraseAlignment", "VisualBertForVisualReasoning", "VisualBertLayer", "VisualBertModel", "VisualBertPreTrainedModel", ]
transformers/src/transformers/models/visual_bert/modeling_visual_bert.py/0
{ "file_path": "transformers/src/transformers/models/visual_bert/modeling_visual_bert.py", "repo_id": "transformers", "token_count": 29619 }
547
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for VitPose.""" import itertools import math from typing import TYPE_CHECKING, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import to_channel_dimension_format from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_scipy_available, is_torch_available, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL if is_scipy_available(): from scipy.linalg import inv from scipy.ndimage import affine_transform, gaussian_filter if TYPE_CHECKING: from .modeling_vitpose import VitPoseEstimatorOutput logger = logging.get_logger(__name__) # inspired by https://github.com/ViTAE-Transformer/ViTPose/blob/d5216452796c90c6bc29f5c5ec0bdba94366768a/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py#L132 def box_to_center_and_scale( box: Union[tuple, list, np.ndarray], image_width: int, image_height: int, normalize_factor: float = 200.0, padding_factor: float = 1.25, ): """ Encodes a bounding box in COCO format into (center, scale). Args: box (`Tuple`, `List`, or `np.ndarray`): Bounding box in COCO format (top_left_x, top_left_y, width, height). image_width (`int`): Image width. image_height (`int`): Image height. normalize_factor (`float`): Width and height scale factor. padding_factor (`float`): Bounding box padding factor. Returns: tuple: A tuple containing center and scale. - `np.ndarray` [float32](2,): Center of the bbox (x, y). - `np.ndarray` [float32](2,): Scale of the bbox width & height. """ top_left_x, top_left_y, width, height = box[:4] aspect_ratio = image_width / image_height center = np.array([top_left_x + width * 0.5, top_left_y + height * 0.5], dtype=np.float32) if width > aspect_ratio * height: height = width * 1.0 / aspect_ratio elif width < aspect_ratio * height: width = height * aspect_ratio scale = np.array([width / normalize_factor, height / normalize_factor], dtype=np.float32) scale = scale * padding_factor return center, scale def coco_to_pascal_voc(bboxes: np.ndarray) -> np.ndarray: """ Converts bounding boxes from the COCO format to the Pascal VOC format. In other words, converts from (top_left_x, top_left_y, width, height) format to (top_left_x, top_left_y, bottom_right_x, bottom_right_y). Args: bboxes (`np.ndarray` of shape `(batch_size, 4)): Bounding boxes in COCO format. Returns: `np.ndarray` of shape `(batch_size, 4) in Pascal VOC format. """ bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0] - 1 bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1] - 1 return bboxes def get_keypoint_predictions(heatmaps: np.ndarray) -> tuple[np.ndarray, np.ndarray]: """Get keypoint predictions from score maps. Args: heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`): Model predicted heatmaps. Returns: tuple: A tuple containing aggregated results. - coords (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`): Predicted keypoint location. - scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`): Scores (confidence) of the keypoints. """ if not isinstance(heatmaps, np.ndarray): raise TypeError("Heatmaps should be np.ndarray") if heatmaps.ndim != 4: raise ValueError("Heatmaps should be 4-dimensional") batch_size, num_keypoints, _, width = heatmaps.shape heatmaps_reshaped = heatmaps.reshape((batch_size, num_keypoints, -1)) idx = np.argmax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1)) scores = np.amax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1)) preds = np.tile(idx, (1, 1, 2)).astype(np.float32) preds[:, :, 0] = preds[:, :, 0] % width preds[:, :, 1] = preds[:, :, 1] // width preds = np.where(np.tile(scores, (1, 1, 2)) > 0.0, preds, -1) return preds, scores def post_dark_unbiased_data_processing(coords: np.ndarray, batch_heatmaps: np.ndarray, kernel: int = 3) -> np.ndarray: """DARK post-pocessing. Implemented by unbiased_data_processing. Paper references: - Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). - Zhang et al. Distribution-Aware Coordinate Representation for Human Pose Estimation (CVPR 2020). Args: coords (`np.ndarray` of shape `(num_persons, num_keypoints, 2)`): Initial coordinates of human pose. batch_heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`): Batched heatmaps as predicted by the model. A batch_size of 1 is used for the bottom up paradigm where all persons share the same heatmap. A batch_size of `num_persons` is used for the top down paradigm where each person has its own heatmaps. kernel (`int`, *optional*, defaults to 3): Gaussian kernel size (K) for modulation. Returns: `np.ndarray` of shape `(num_persons, num_keypoints, 2)` ): Refined coordinates. """ batch_size, num_keypoints, height, width = batch_heatmaps.shape num_coords = coords.shape[0] if not (batch_size == 1 or batch_size == num_coords): raise ValueError("The batch size of heatmaps should be 1 or equal to the batch size of coordinates.") radius = int((kernel - 1) // 2) batch_heatmaps = np.array( [ [gaussian_filter(heatmap, sigma=0.8, radius=(radius, radius), axes=(0, 1)) for heatmap in heatmaps] for heatmaps in batch_heatmaps ] ) batch_heatmaps = np.clip(batch_heatmaps, 0.001, 50) batch_heatmaps = np.log(batch_heatmaps) batch_heatmaps_pad = np.pad(batch_heatmaps, ((0, 0), (0, 0), (1, 1), (1, 1)), mode="edge").flatten() # calculate indices for coordinates index = coords[..., 0] + 1 + (coords[..., 1] + 1) * (width + 2) index += (width + 2) * (height + 2) * np.arange(0, batch_size * num_keypoints).reshape(-1, num_keypoints) index = index.astype(int).reshape(-1, 1) i_ = batch_heatmaps_pad[index] ix1 = batch_heatmaps_pad[index + 1] iy1 = batch_heatmaps_pad[index + width + 2] ix1y1 = batch_heatmaps_pad[index + width + 3] ix1_y1_ = batch_heatmaps_pad[index - width - 3] ix1_ = batch_heatmaps_pad[index - 1] iy1_ = batch_heatmaps_pad[index - 2 - width] # calculate refined coordinates using Newton's method dx = 0.5 * (ix1 - ix1_) dy = 0.5 * (iy1 - iy1_) derivative = np.concatenate([dx, dy], axis=1) derivative = derivative.reshape(num_coords, num_keypoints, 2, 1) dxx = ix1 - 2 * i_ + ix1_ dyy = iy1 - 2 * i_ + iy1_ dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_) hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1) hessian = hessian.reshape(num_coords, num_keypoints, 2, 2) hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2)) coords -= np.einsum("ijmn,ijnk->ijmk", hessian, derivative).squeeze() return coords def transform_preds(coords: np.ndarray, center: np.ndarray, scale: np.ndarray, output_size: np.ndarray) -> np.ndarray: """Get final keypoint predictions from heatmaps and apply scaling and translation to map them back to the image. Note: num_keypoints: K Args: coords (`np.ndarray` of shape `(num_keypoints, ndims)`): * If ndims=2, corrds are predicted keypoint location. * If ndims=4, corrds are composed of (x, y, scores, tags) * If ndims=5, corrds are composed of (x, y, scores, tags, flipped_tags) center (`np.ndarray` of shape `(2,)`): Center of the bounding box (x, y). scale (`np.ndarray` of shape `(2,)`): Scale of the bounding box wrt original image of width and height. output_size (`np.ndarray` of shape `(2,)`): Size of the destination heatmaps in (height, width) format. Returns: np.ndarray: Predicted coordinates in the images. """ if coords.shape[1] not in (2, 4, 5): raise ValueError("Coordinates need to have either 2, 4 or 5 dimensions.") if len(center) != 2: raise ValueError("Center needs to have 2 elements, one for x and one for y.") if len(scale) != 2: raise ValueError("Scale needs to consist of a width and height") if len(output_size) != 2: raise ValueError("Output size needs to consist of a height and width") # Recover the scale which is normalized by a factor of 200. scale = scale * 200.0 # We use unbiased data processing scale_y = scale[1] / (output_size[0] - 1.0) scale_x = scale[0] / (output_size[1] - 1.0) target_coords = np.ones_like(coords) target_coords[:, 0] = coords[:, 0] * scale_x + center[0] - scale[0] * 0.5 target_coords[:, 1] = coords[:, 1] * scale_y + center[1] - scale[1] * 0.5 return target_coords def get_warp_matrix(theta: float, size_input: np.ndarray, size_dst: np.ndarray, size_target: np.ndarray): """ Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Source: https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py Args: theta (`float`): Rotation angle in degrees. size_input (`np.ndarray`): Size of input image [width, height]. size_dst (`np.ndarray`): Size of output image [width, height]. size_target (`np.ndarray`): Size of ROI in input plane [w, h]. Returns: `np.ndarray`: A matrix for transformation. """ theta = np.deg2rad(theta) matrix = np.zeros((2, 3), dtype=np.float32) scale_x = size_dst[0] / size_target[0] scale_y = size_dst[1] / size_target[1] matrix[0, 0] = math.cos(theta) * scale_x matrix[0, 1] = -math.sin(theta) * scale_x matrix[0, 2] = scale_x * ( -0.5 * size_input[0] * math.cos(theta) + 0.5 * size_input[1] * math.sin(theta) + 0.5 * size_target[0] ) matrix[1, 0] = math.sin(theta) * scale_y matrix[1, 1] = math.cos(theta) * scale_y matrix[1, 2] = scale_y * ( -0.5 * size_input[0] * math.sin(theta) - 0.5 * size_input[1] * math.cos(theta) + 0.5 * size_target[1] ) return matrix def scipy_warp_affine(src, M, size): """ This function implements cv2.warpAffine function using affine_transform in scipy. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.affine_transform.html and https://docs.opencv.org/4.x/d4/d61/tutorial_warp_affine.html for more details. Note: the original implementation of cv2.warpAffine uses cv2.INTER_LINEAR. """ channels = [src[..., i] for i in range(src.shape[-1])] # Convert to a 3x3 matrix used by SciPy M_scipy = np.vstack([M, [0, 0, 1]]) # If you have a matrix for the ‘push’ transformation, use its inverse (numpy.linalg.inv) in this function. M_inv = inv(M_scipy) M_inv[0, 0], M_inv[0, 1], M_inv[1, 0], M_inv[1, 1], M_inv[0, 2], M_inv[1, 2] = ( M_inv[1, 1], M_inv[1, 0], M_inv[0, 1], M_inv[0, 0], M_inv[1, 2], M_inv[0, 2], ) new_src = [affine_transform(channel, M_inv, output_shape=size, order=1) for channel in channels] new_src = np.stack(new_src, axis=-1) return new_src class VitPoseImageProcessor(BaseImageProcessor): r""" Constructs a VitPose image processor. Args: do_affine_transform (`bool`, *optional*, defaults to `True`): Whether to apply an affine transformation to the input images. size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 192}`): Resolution of the image after `affine_transform` is applied. Only has an effect if `do_affine_transform` is set to `True`. Can be overridden by `size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`list[int]`, defaults to `[0.485, 0.456, 0.406]`, *optional*): The sequence of means for each channel, to be used when normalizing images. image_std (`list[int]`, defaults to `[0.229, 0.224, 0.225]`, *optional*): The sequence of standard deviations for each channel, to be used when normalizing images. """ model_input_names = ["pixel_values"] def __init__( self, do_affine_transform: bool = True, size: Optional[dict[str, int]] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, **kwargs, ): super().__init__(**kwargs) self.do_affine_transform = do_affine_transform self.size = size if size is not None else {"height": 256, "width": 192} self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.normalize_factor = 200.0 def affine_transform( self, image: np.array, center: tuple[float], scale: tuple[float], rotation: float, size: dict[str, int], data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.array: """ Apply an affine transformation to an image. Args: image (`np.array`): Image to transform. center (`tuple[float]`): Center of the bounding box (x, y). scale (`tuple[float]`): Scale of the bounding box with respect to height/width. rotation (`float`): Rotation angle in degrees. size (`dict[str, int]`): Size of the destination image. data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format of the output image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. """ data_format = input_data_format if data_format is None else data_format size = (size["width"], size["height"]) # one uses a pixel standard deviation of 200 pixels transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0) # input image requires channels last format image = ( image if input_data_format == ChannelDimension.LAST else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) ) image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0])) image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST) return image def preprocess( self, images: ImageInput, boxes: Union[list[list[float]], np.ndarray], do_affine_transform: Optional[bool] = None, size: Optional[dict[str, int]] = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, list[float]]] = None, image_std: Optional[Union[float, list[float]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. boxes (`list[list[list[float]]]` or `np.ndarray`): List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding box coordinates in COCO format (top_left_x, top_left_y, width, height). do_affine_transform (`bool`, *optional*, defaults to `self.do_affine_transform`): Whether to apply an affine transformation to the input images. size (`dict[str, int]` *optional*, defaults to `self.size`): Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after resizing. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean to use if `do_normalize` is set to `True`. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use if `do_normalize` is set to `True`. return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, width). """ do_affine_transform = do_affine_transform if do_affine_transform is not None else self.do_affine_transform size = size if size is not None else self.size do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if isinstance(boxes, list) and len(images) != len(boxes): raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {len(boxes)}") elif isinstance(boxes, np.ndarray) and len(images) != boxes.shape[0]: raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {boxes.shape[0]}") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if is_scaled_image(images[0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." ) if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) # transformations (affine transformation + rescaling + normalization) if self.do_affine_transform: new_images = [] for image, image_boxes in zip(images, boxes): for box in image_boxes: center, scale = box_to_center_and_scale( box, image_width=size["width"], image_height=size["height"], normalize_factor=self.normalize_factor, ) transformed_image = self.affine_transform( image, center, scale, rotation=0, size=size, input_data_format=input_data_format ) new_images.append(transformed_image) images = new_images # For batch processing, the number of boxes must be consistent across all images in the batch. # When using a list input, the number of boxes can vary dynamically per image. # The image processor creates pixel_values of shape (batch_size*num_persons, num_channels, height, width) all_images = [] for image in images: if do_rescale: image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) if do_normalize: image = self.normalize( image=image, mean=image_mean, std=image_std, input_data_format=input_data_format ) all_images.append(image) images = [ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images ] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs def keypoints_from_heatmaps( self, heatmaps: np.ndarray, center: np.ndarray, scale: np.ndarray, kernel: int = 11, ): """ Get final keypoint predictions from heatmaps and transform them back to the image. Args: heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`): Model predicted heatmaps. center (`np.ndarray` of shape `(batch_size, 2)`): Center of the bounding box (x, y). scale (`np.ndarray` of shape `(batch_size, 2)`): Scale of the bounding box wrt original images of width and height. kernel (int, *optional*, defaults to 11): Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training. K=17 for sigma=3 and k=11 for sigma=2. Returns: tuple: A tuple containing keypoint predictions and scores. - preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`): Predicted keypoint location in images. - scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`): Scores (confidence) of the keypoints. """ batch_size, _, height, width = heatmaps.shape coords, scores = get_keypoint_predictions(heatmaps) preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel) # Transform back to the image for i in range(batch_size): preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width]) return preds, scores def post_process_pose_estimation( self, outputs: "VitPoseEstimatorOutput", boxes: Union[list[list[list[float]]], np.ndarray], kernel_size: int = 11, threshold: Optional[float] = None, target_sizes: Union[TensorType, list[tuple]] = None, ): """ Transform the heatmaps into keypoint predictions and transform them back to the image. Args: outputs (`VitPoseEstimatorOutput`): VitPoseForPoseEstimation model outputs. boxes (`list[list[list[float]]]` or `np.ndarray`): List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding box coordinates in COCO format (top_left_x, top_left_y, width, height). kernel_size (`int`, *optional*, defaults to 11): Gaussian kernel size (K) for modulation. threshold (`float`, *optional*, defaults to None): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will be resize with the default value. Returns: `list[list[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image in the batch as predicted by the model. """ # First compute centers and scales for each bounding box batch_size, num_keypoints, _, _ = outputs.heatmaps.shape if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) centers = np.zeros((batch_size, 2), dtype=np.float32) scales = np.zeros((batch_size, 2), dtype=np.float32) flattened_boxes = list(itertools.chain(*boxes)) for i in range(batch_size): if target_sizes is not None: image_width, image_height = target_sizes[i][0], target_sizes[i][1] scale_factor = np.array([image_width, image_height, image_width, image_height]) flattened_boxes[i] = flattened_boxes[i] * scale_factor width, height = self.size["width"], self.size["height"] center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height) centers[i, :] = center scales[i, :] = scale preds, scores = self.keypoints_from_heatmaps( outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size ) all_boxes = np.zeros((batch_size, 4), dtype=np.float32) all_boxes[:, 0:2] = centers[:, 0:2] all_boxes[:, 2:4] = scales[:, 0:2] poses = torch.tensor(preds) scores = torch.tensor(scores) labels = torch.arange(0, num_keypoints) bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes)) results: list[list[dict[str, torch.Tensor]]] = [] pose_bbox_pairs = zip(poses, scores, bboxes_xyxy) for image_bboxes in boxes: image_results: list[dict[str, torch.Tensor]] = [] for _ in image_bboxes: # Unpack the next pose and bbox_xyxy from the iterator pose, score, bbox_xyxy = next(pose_bbox_pairs) score = score.squeeze() keypoints_labels = labels if threshold is not None: keep = score > threshold pose = pose[keep] score = score[keep] keypoints_labels = keypoints_labels[keep] pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy} image_results.append(pose_result) results.append(image_results) return results __all__ = ["VitPoseImageProcessor"]
transformers/src/transformers/models/vitpose/image_processing_vitpose.py/0
{ "file_path": "transformers/src/transformers/models/vitpose/image_processing_vitpose.py", "repo_id": "transformers", "token_count": 12642 }
548
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VJEPA 2 model configuration""" from ...configuration_utils import PretrainedConfig class VJEPA2Config(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`VJEPA2Model`]. It is used to instantiate an VJEPA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the VJEPA2 [facebook/vjepa2-vitl-fpc64-256](https://huggingface.co/facebook/vjepa2-vitl-fpc64-256) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. crop_size (`int`, *optional*, defaults to 256): Input resolution of the model frames_per_clip (`int`, *optional*, defaults to 64): The number of frames the model has been pretrained with. Does not impact inference. tubelet_size (`int`, *optional*, defaults to 2): The number of temporal frames used for a single rastor, check paper for more information. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers in_chans (`int`, *optional*, defaults to 3): The number of input channels num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Encoder num_hidden_layers (`int`, *optional*, defaults to 24): The number of hidden layers drop_path_rate (`float`, *optional*, defaults to 0.0): Stochastic depth rate per sample (when applied in the main path of residual layers). mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of the hidden size of the MLPs used in Encoder relative to the `hidden_size`. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for attentions. The dropout probability for all fully connected layers. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for attentions. num_pooler_layers (`int`, *optional*, defaults to 3): The number of self-attention layers in the pooler. pred_hidden_size (`int`, *optional*, defaults to 384): Dimensionality of the predictor layers pred_num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Predictor pred_num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Predictor pred_num_mask_tokens (`int`, *optional*, defaults to 10): Define the number of mask tokens to use in the Predictor pred_zero_init_mask_tokens (`bool`, *optional*, defaults to `True`): Initialize the mask tokens in the predictor with 0. pred_mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of the hidden size of the MLPs used in Predictor relative to the `pred_hidden_size`. Example: ```python >>> from transformers import VJEPA2Config, VJEPA2Model >>> # Initializing a VJEPA2 vjepa2-vitl-fpc64-256 style configuration >>> configuration = VJEPA2Config() >>> # Initializing a model (with random weights) from the vjepa2-vitl-fpc64-256 style configuration >>> model = VJEPA2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "vjepa2" def __init__( self, patch_size=16, crop_size=256, frames_per_clip=64, tubelet_size=2, hidden_size=1024, in_chans=3, num_attention_heads=16, num_hidden_layers=24, drop_path_rate=0.0, mlp_ratio=4.0, layer_norm_eps=1e-6, qkv_bias=True, attention_probs_dropout_prob=0.0, hidden_act="gelu", initializer_range=0.02, attention_dropout=0.0, num_pooler_layers=3, # predictor params pred_hidden_size=384, pred_num_attention_heads=12, pred_num_hidden_layers=12, pred_num_mask_tokens=10, pred_zero_init_mask_tokens=True, pred_mlp_ratio=4.0, **kwargs, ): super().__init__(**kwargs) self.crop_size = crop_size self.frames_per_clip = frames_per_clip self.patch_size = patch_size self.tubelet_size = tubelet_size self.hidden_size = hidden_size self.in_chans = in_chans self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.drop_path_rate = drop_path_rate self.mlp_ratio = mlp_ratio self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.attention_probs_dropout_prob = attention_probs_dropout_prob self.hidden_act = hidden_act self.initializer_range = initializer_range self.image_size = crop_size self.attention_dropout = attention_dropout self.num_pooler_layers = num_pooler_layers # predictor params self.pred_hidden_size = pred_hidden_size self.pred_num_attention_heads = pred_num_attention_heads self.pred_num_hidden_layers = pred_num_hidden_layers self.pred_num_mask_tokens = pred_num_mask_tokens self.pred_zero_init_mask_tokens = pred_zero_init_mask_tokens self.pred_mlp_ratio = pred_mlp_ratio __all__ = ["VJEPA2Config"]
transformers/src/transformers/models/vjepa2/configuration_vjepa2.py/0
{ "file_path": "transformers/src/transformers/models/vjepa2/configuration_vjepa2.py", "repo_id": "transformers", "token_count": 2799 }
549
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax Wav2Vec2 model.""" from functools import partial from typing import Optional, Union import flax import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wav2vec2 import Wav2Vec2Config logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxWav2Vec2BaseModelOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2BaseModelOutput`], with potential hidden states and attentions. Args: last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. extract_features (`jnp.ndarray` of shape `(batch_size, sequence_length, last_conv_dim)`): Sequence of extracted feature vectors of the last convolutional layer of the model with `last_conv_dim` being the dimension of the last convolutional layer. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: jnp.ndarray = None extract_features: jnp.ndarray = None hidden_states: Optional[tuple[jnp.ndarray]] = None attentions: Optional[tuple[jnp.ndarray]] = None @flax.struct.dataclass class FlaxWav2Vec2ForPreTrainingOutput(ModelOutput): """ Output type of [`FlaxWav2Vec2ForPreTrainingOutput`], with potential hidden states and attentions. Args: loss (*optional*, returned when model is in train mode, `jnp.ndarray` of shape `(1,)`): Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477) . (classification) loss. projected_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked projected quantized states. projected_quantized_states (`jnp.ndarray` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`): Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive target vectors for contrastive loss. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ projected_states: jnp.ndarray = None projected_quantized_states: jnp.ndarray = None codevector_perplexity: jnp.ndarray = None hidden_states: Optional[tuple[jnp.ndarray]] = None attentions: Optional[tuple[jnp.ndarray]] = None def _compute_mask_indices( shape: tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[np.ndarray] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and" f" `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + np.random.rand(1).item()) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) # get random indices to mask spec_aug_mask_idxs = np.array( [ np.random.choice(np.arange(sequence_length - (mask_length - 1)), num_masked_spans, replace=False) for _ in range(batch_size) ] ) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, num_masked_spans, mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, num_masked_spans * mask_length) offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, num_masked_spans, mask_length)).reshape( batch_size, num_masked_spans * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) if attention_mask is not None: # make sure padded input ids cannot be masked spec_aug_mask = np.where(attention_mask, spec_aug_mask, False) return spec_aug_mask def _sample_negative_indices(features_shape: tuple, num_negatives: int, attention_mask: Optional[np.ndarray] = None): """ Sample `num_negatives` vectors from feature vectors. """ batch_size, sequence_length, hidden_size = features_shape if sequence_length <= 1: raise ValueError( "`features should have `sequence_length` > 1, but are of shape " f"(batch_size, sequence_length, hidden_size) = ({batch_size, sequence_length, hidden_size})." ) # get `num_negatives` random vector indices from the same utterance sampled_negative_indices = [] for batch_idx in range(batch_size): high = attention_mask[batch_idx].sum() - 1 if attention_mask is not None else sequence_length - 1 sampled_indices_slice = np.random.randint(0, high, size=(num_negatives * sequence_length,)) sampled_negative_indices.append(sampled_indices_slice) sampled_negative_indices = np.asarray(sampled_negative_indices, dtype=np.int32) # generate indices of the positive vectors themselves, repeat them `num_negatives` times feature_indices = np.broadcast_to(np.arange(sequence_length)[:, None], (sequence_length, num_negatives)).flatten() # avoid sampling the same positive vector, but keep the distribution uniform sampled_negative_indices[sampled_negative_indices >= feature_indices] += 1 # correct for batch size for batch_idx in range(1, batch_size): sampled_negative_indices[batch_idx] += batch_idx * sequence_length return sampled_negative_indices WAV2VEC2_START_DOCSTRING = r""" Wav2Vec2 was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://huggingface.co/papers/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`Wav2Vec2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WAV2VEC2_INPUTS_DOCSTRING = r""" Args: input_values (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `jnp.ndarray`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) .. warning:: `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. mask_time_indices (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWav2Vec2LayerNormConvLayer(nn.Module): config: Wav2Vec2Config layer_id: int = 0 dtype: jnp.dtype = jnp.float32 def setup(self): self.in_conv_dim = self.config.conv_dim[self.layer_id] if self.layer_id > 0 else 1 self.out_conv_dim = self.config.conv_dim[self.layer_id] self.conv = nn.Conv( features=self.config.conv_dim[self.layer_id], kernel_size=(self.config.conv_kernel[self.layer_id],), strides=(self.config.conv_stride[self.layer_id],), use_bias=self.config.conv_bias, kernel_init=jax.nn.initializers.he_normal(), padding="VALID", dtype=self.dtype, ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxConvWithWeightNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=self.config.hidden_size, kernel_size=(self.config.num_conv_pos_embeddings,), kernel_init=jax.nn.initializers.he_normal(), padding="VALID", feature_group_count=self.config.num_conv_pos_embedding_groups, dtype=self.dtype, ) weight_shape = ( self.conv.features, self.conv.features // self.conv.feature_group_count, self.conv.kernel_size[0], ) self.weight_v = self.param("weight_v", jax.nn.initializers.he_normal(), weight_shape) self.weight_g = self.param("weight_g", lambda _: jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :]) self.bias = self.param("bias", jax.nn.initializers.zeros, (self.conv.features,)) self.prev_padding = self.conv.kernel_size[0] // 2 def _get_normed_weights(self): weight_v_norm = jnp.linalg.norm(self.weight_v, axis=(0, 1))[None, None, :] normed_weight_v = jnp.divide(self.weight_v, weight_v_norm) normed_kernel = jnp.multiply(normed_weight_v, self.weight_g) return normed_kernel def __call__(self, hidden_states): kernel = self._get_normed_weights() hidden_states = jnp.pad(hidden_states, ((0, 0), (self.prev_padding, self.prev_padding), (0, 0))) hidden_states = self.conv.apply({"params": {"kernel": kernel.T, "bias": self.bias}}, hidden_states) return hidden_states class FlaxWav2Vec2PositionalConvEmbedding(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = FlaxConvWithWeightNorm(self.config, dtype=self.dtype) self.activation = ACT2FN[self.config.feat_extract_activation] self.num_pad_remove = 1 if self.config.num_conv_pos_embeddings % 2 == 0 else 0 def __call__(self, hidden_states): hidden_states = hidden_states.transpose((0, 1, 2)) hidden_states = self.conv(hidden_states) if self.num_pad_remove > 0: hidden_states = hidden_states[:, : -self.num_pad_remove, :] hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose((0, 1, 2)) return hidden_states class FlaxConvLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.feat_extract_norm == "layer": self.layers = [ FlaxWav2Vec2LayerNormConvLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_feat_extract_layers) ] elif self.config.feat_extract_norm == "group": raise NotImplementedError("At the moment only ``config.feat_extact_norm == 'layer'`` is supported") else: raise ValueError( f"`config.feat_extract_norm` is {self.config.feat_extract_norm}, but has to be one of ['group'," " 'layer']" ) def __call__(self, hidden_states): for i, conv_layer in enumerate(self.layers): hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2FeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv_layers = FlaxConvLayersCollection(self.config, dtype=self.dtype) def __call__(self, input_values, freeze_feature_encoder=False): hidden_states = input_values[:, :, None] hidden_states = self.conv_layers(hidden_states) if freeze_feature_encoder: hidden_states = jax.lax.stop_gradient(hidden_states) return hidden_states class FlaxWav2Vec2FeatureProjection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.projection = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.feat_proj_dropout) def __call__(self, hidden_states, deterministic=True): norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states, norm_hidden_states class FlaxWav2Vec2Attention(nn.Module): config: Wav2Vec2Config embed_dim: int num_heads: int dropout: float = 0.0 bias: bool = True dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, use_bias=self.bias, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense() self.out_proj = dense() self.dropout_layer = nn.Dropout(rate=self.dropout) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, ) -> tuple[jnp.ndarray]: """Input shape: Batch x Time x Channel""" # get query proj query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights class FlaxWav2Vec2FeedForward(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.intermediate_dropout = nn.Dropout(rate=self.config.activation_dropout) self.intermediate_dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) if isinstance(self.config.hidden_act, str): self.intermediate_act_fn = ACT2FN[self.config.hidden_act] else: self.intermediate_act_fn = self.config.hidden_act self.output_dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.output_dropout = nn.Dropout(rate=self.config.hidden_dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states, deterministic=deterministic) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states, deterministic=deterministic) return hidden_states class FlaxWav2Vec2EncoderLayerStableLayerNorm(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.attention = FlaxWav2Vec2Attention( config=self.config, embed_dim=self.config.hidden_size, num_heads=self.config.num_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.feed_forward = FlaxWav2Vec2FeedForward(self.config, dtype=self.dtype) self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights = self.attention( hidden_states, attention_mask=attention_mask, deterministic=deterministic ) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward( self.final_layer_norm(hidden_states), deterministic=deterministic ) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWav2Vec2EncoderLayerStableLayerNormCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2EncoderLayerStableLayerNorm(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class FlaxWav2Vec2StableLayerNormEncoder(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.pos_conv_embed = FlaxWav2Vec2PositionalConvEmbedding(self.config, dtype=self.dtype) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout) self.layers = FlaxWav2Vec2EncoderLayerStableLayerNormCollection(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask=None, deterministic=True, output_attentions=False, output_hidden_states=False, return_dict=True, ): if attention_mask is not None: # make sure padded tokens are not attended to hidden_states = jnp.where( jnp.broadcast_to(attention_mask[:, :, None], hidden_states.shape), hidden_states, 0 ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = self.layer_norm(outputs[0]) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_state,) if not return_dict: outputs = (last_hidden_state, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_state, hidden_states=hidden_states, attentions=outputs.attentions ) class FlaxWav2Vec2GumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information. """ config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.num_groups = self.config.num_codevector_groups self.num_vars = self.config.num_codevectors_per_group if self.config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {self.config.codevector_dim} must be divisible by" f" `config.num_codevector_groups` {self.num_groups} for concatenation" ) # storage for codebook variables (codewords) self.codevectors = self.param( "codevectors", jax.nn.initializers.uniform(), (1, self.num_groups * self.num_vars, self.config.codevector_dim // self.num_groups), ) self.weight_proj = nn.Dense( self.num_groups * self.num_vars, kernel_init=jax.nn.initializers.normal(1.0), dtype=self.dtype, ) @staticmethod def _compute_perplexity(probs, mask=None): if mask is not None: mask_extended = jnp.broadcast_to(mask.flatten()[:, None, None], probs.shape) probs = jnp.where(mask_extended, probs, jnp.zeros_like(probs)) marginal_probs = probs.sum(axis=0) / mask.sum() else: marginal_probs = probs.mean(axis=0) perplexity = jnp.exp(-jnp.sum(marginal_probs * jnp.log(marginal_probs + 1e-7), axis=-1)).sum() return perplexity def __call__(self, hidden_states, mask_time_indices=None, deterministic=True, temperature=1): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.reshape(batch_size * sequence_length * self.num_groups, -1) if not deterministic: # sample code vector probs via gumbel in differentiateable way gumbel_rng = self.make_rng("gumbel") gumbels = jax.random.gumbel(gumbel_rng, hidden_states.shape) codevector_probs = nn.softmax((hidden_states + gumbels) / temperature) # compute perplexity codevector_soft_dist = nn.softmax( hidden_states.reshape(batch_size * sequence_length, self.num_groups, -1), axis=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(axis=-1) codevector_probs = jax.nn.one_hot(codevector_idx, hidden_states.shape[-1]) * 1.0 codevector_probs = codevector_probs.reshape(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs, mask_time_indices) codevector_probs = codevector_probs.reshape(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = jnp.expand_dims(codevector_probs, axis=-1) * self.codevectors codevectors = codevectors_per_group.reshape(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).reshape(batch_size, sequence_length, -1) return codevectors, perplexity class FlaxWav2Vec2Adapter(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): # hidden_states require down-projection if feature dims don't match if self.config.output_hidden_size != self.config.hidden_size: self.proj = nn.Dense( self.config.output_hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.proj_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) else: self.proj = self.proj_layer_norm = None self.layers = FlaxWav2Vec2AdapterLayersCollection(self.config, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): # down-project hidden_states if required if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = self.layers(hidden_states) return hidden_states class FlaxWav2Vec2AdapterLayer(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( features=2 * self.config.output_hidden_size, kernel_size=(self.config.adapter_kernel_size,), strides=(self.config.adapter_stride,), padding=((1, 1),), kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.glu(hidden_states, axis=2) return hidden_states class FlaxWav2Vec2AdapterLayersCollection(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.layers = [ FlaxWav2Vec2AdapterLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_adapter_layers) ] def __call__(self, hidden_states): for conv_layer in self.layers: hidden_states = conv_layer(hidden_states) return hidden_states class FlaxWav2Vec2PreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = Wav2Vec2Config base_model_prefix: str = "wav2vec2" main_input_name = "input_values" module_class: nn.Module = None def __init__( self, config: Wav2Vec2Config, input_shape: tuple = (1, 1024), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_values = jnp.zeros(input_shape, dtype="i4") attention_mask = jnp.ones_like(input_values) params_rng, dropout_rng = jax.random.split(rng, 2) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init(rngs, input_values, attention_mask, return_dict=False)["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WAV2VEC2_INPUTS_DOCSTRING) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): return self.module._get_feat_extract_output_lengths(input_lengths, add_adapter=add_adapter) class FlaxWav2Vec2Module(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.feature_extractor = FlaxWav2Vec2FeatureEncoder(self.config, dtype=self.dtype) self.feature_projection = FlaxWav2Vec2FeatureProjection(self.config, dtype=self.dtype) self.masked_spec_embed = self.param( "masked_spec_embed", jax.nn.initializers.uniform(), (self.config.hidden_size,) ) if self.config.do_stable_layer_norm: self.encoder = FlaxWav2Vec2StableLayerNormEncoder(self.config, dtype=self.dtype) else: raise NotImplementedError("``config.do_stable_layer_norm is False`` is currently not supported.") self.adapter = FlaxWav2Vec2Adapter(self.config, dtype=self.dtype) if self.config.add_adapter else None def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): extract_features = self.feature_extractor(input_values, freeze_feature_encoder=freeze_feature_encoder) # make sure that no loss is computed on padded inputs if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features, deterministic=deterministic) if mask_time_indices is not None: # apply SpecAugment along time axis with given indices hidden_states = jnp.where( jnp.broadcast_to(mask_time_indices[:, :, None], hidden_states.shape), jnp.broadcast_to(self.masked_spec_embed[None, None, :], hidden_states.shape), hidden_states, ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return FlaxWav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: jnp.ndarray, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(axis=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) batch_size = attention_mask.shape[0] attention_mask = jnp.zeros((batch_size, feature_vector_length), dtype=attention_mask.dtype) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask = attention_mask.at[jnp.arange(attention_mask.shape[0]), output_lengths - 1].set(1) attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") return attention_mask @add_start_docstrings( "The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.", WAV2VEC2_START_DOCSTRING, ) class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2Module FLAX_WAV2VEC2_MODEL_DOCSTRING = """ Returns: Example: ```python >>> from transformers import AutoProcessor, FlaxWav2Vec2Model >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2Model.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(example): ... example["speech"] = example["audio"]["array"] ... return example >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ``` """ overwrite_call_docstring( FlaxWav2Vec2Model, WAV2VEC2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_MODEL_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2Model, output_type=FlaxWav2Vec2BaseModelOutput, config_class=Wav2Vec2Config ) class FlaxWav2Vec2ForCTCModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.final_dropout) self.lm_head = nn.Dense( self.config.vocab_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, deterministic=True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): outputs = self.wav2vec2( input_values, attention_mask=attention_mask, mask_time_indices=mask_time_indices, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.lm_head(hidden_states) if not return_dict: return (logits,) + outputs[2:] return FlaxCausalLMOutput(logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None, ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings( "Wav2Vec2 Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).", WAV2VEC2_START_DOCSTRING, ) class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForCTCModule FLAX_WAV2VEC2_FOR_CTC_DOCSTRING = """ Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoProcessor, FlaxWav2Vec2ForCTC >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> model = FlaxWav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60") >>> def map_to_array(example): ... example["speech"] = example["audio"]["array"] ... return example >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor( ... ds["speech"][0], sampling_rate=16_000, return_tensors="np" ... ).input_values # Batch size 1 >>> logits = model(input_values).logits >>> predicted_ids = jnp.argmax(logits, axis=-1) >>> transcription = processor.decode(predicted_ids[0]) >>> # should give: "A MAN SAID TO THE UNIVERSE SIR I EXIST" ``` """ overwrite_call_docstring( FlaxWav2Vec2ForCTC, WAV2VEC2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_CTC_DOCSTRING, ) append_replace_return_docstrings(FlaxWav2Vec2ForCTC, output_type=FlaxCausalLMOutput, config_class=Wav2Vec2Config) class FlaxWav2Vec2ForPreTrainingModule(nn.Module): config: Wav2Vec2Config dtype: jnp.dtype = jnp.float32 def setup(self): self.wav2vec2 = FlaxWav2Vec2Module(self.config, dtype=self.dtype) self.dropout_features = nn.Dropout(self.config.feat_quantizer_dropout) self.quantizer = FlaxWav2Vec2GumbelVectorQuantizer(self.config, dtype=self.dtype) self.project_q = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.project_hid = nn.Dense( self.config.proj_codevector_dim, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, deterministic: bool = True, output_attentions=None, output_hidden_states=None, freeze_feature_encoder=False, return_dict=None, ): r""" Returns: Example: ```python ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wav2vec2( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, mask_time_indices=mask_time_indices, deterministic=deterministic, freeze_feature_encoder=freeze_feature_encoder, return_dict=return_dict, ) # project all transformed features (including masked) to final vq dim transformer_features = self.project_hid(outputs[0]) # quantize all (unmasked) extracted features and project to final vq dim extract_features = self.dropout_features(outputs[1], deterministic=deterministic) quantized_features, codevector_perplexity = self.quantizer( extract_features, mask_time_indices, deterministic=deterministic, temperature=gumbel_temperature ) quantized_features = self.project_q(quantized_features) if not return_dict: return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:] return FlaxWav2Vec2ForPreTrainingOutput( projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _get_feat_extract_output_lengths( self, input_lengths: Union[jnp.ndarray, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths @add_start_docstrings("""Wav2Vec2 Model with a quantizer and `VQ` head on top.""", WAV2VEC2_START_DOCSTRING) class FlaxWav2Vec2ForPreTraining(FlaxWav2Vec2PreTrainedModel): module_class = FlaxWav2Vec2ForPreTrainingModule @add_start_docstrings_to_model_forward(WAV2VEC2_INPUTS_DOCSTRING) # overwrite since has `gumbel_temperature` input def __call__( self, input_values, attention_mask=None, mask_time_indices=None, gumbel_temperature: int = 1, params: Optional[dict] = None, dropout_rng: jax.random.PRNGKey = None, gumbel_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, freeze_feature_encoder: bool = False, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict batch_size, sequence_length = input_values.shape if attention_mask is None: attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if gumbel_rng is not None: rngs["gumbel"] = gumbel_rng inputs = {"params": params or self.params} return self.module.apply( inputs, jnp.array(input_values, dtype="f4"), jnp.array(attention_mask, dtype="i4"), mask_time_indices, gumbel_temperature, not train, output_attentions, output_hidden_states, freeze_feature_encoder, return_dict, rngs=rngs, ) FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING = """ Returns: Example: ```python >>> import optax >>> import numpy as np >>> import jax.numpy as jnp >>> from transformers import AutoFeatureExtractor, FlaxWav2Vec2ForPreTraining >>> from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-large-lv60") >>> model = FlaxWav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-large-lv60") >>> def map_to_array(example): ... example["speech"] = example["audio"]["array"] ... return example >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = feature_extractor(ds["speech"][0], return_tensors="np").input_values # Batch size 1 >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) >>> outputs = model(input_values, mask_time_indices=mask_time_indices) >>> # compute cosine similarity between predicted (=projected_states) and target (=projected_quantized_states) >>> cosine_sim = optax.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states) >>> # show that cosine similarity is much higher than random >>> assert np.asarray(cosine_sim)[mask_time_indices].mean() > 0.5 ``` """ overwrite_call_docstring( FlaxWav2Vec2ForPreTraining, WAV2VEC2_INPUTS_DOCSTRING + FLAX_WAV2VEC2_FOR_PRETRAINING_DOCSTRING, ) append_replace_return_docstrings( FlaxWav2Vec2ForPreTraining, output_type=FlaxWav2Vec2ForPreTrainingOutput, config_class=Wav2Vec2Config ) __all__ = ["FlaxWav2Vec2ForCTC", "FlaxWav2Vec2ForPreTraining", "FlaxWav2Vec2Model", "FlaxWav2Vec2PreTrainedModel"]
transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py/0
{ "file_path": "transformers/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py", "repo_id": "transformers", "token_count": 24356 }
550
# coding=utf-8 # Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Flax whisper model.""" import math import random from functools import partial from typing import Optional import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from ...generation.flax_logits_process import FlaxWhisperTimeStampLogitsProcessor from ...modeling_flax_outputs import ( FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput, FlaxSequenceClassifierOutput, ) from ...modeling_flax_utils import ( ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring, ) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai/whisper-tiny" _CONFIG_FOR_DOC = "WhisperConfig" remat = nn_partitioning.remat def sinusoidal_embedding_init(key, shape, dtype=jnp.float_) -> jax.Array: """Returns sinusoids for positional embedding""" length, channels = shape if channels % 2 != 0: raise ValueError( f"Number of channels has to be divisible by 2 for sinusoidal positional embeddings, got {channels} channels." ) log_timescale_increment = math.log(10000) / (channels // 2 - 1) inv_timescales = jnp.exp(-log_timescale_increment * jnp.arange(channels // 2)) scaled_time = jnp.arange(length).reshape(-1, 1) * inv_timescales.reshape(1, -1) return jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=1).astype(dtype) WHISPER_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a Flax Linen [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`WhisperConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`. **Note that this only specifies the dtype of the computation and does not influence the dtype of model parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and [`~FlaxPreTrainedModel.to_bf16`]. """ WHISPER_INPUTS_DOCSTRING = r""" Args: input_features (`numpy.ndarray` of shape `(batch_size, feature_size, sequence_length)`): Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the features, padding and conversion into a tensor of type `numpy.ndarray`. See [`~WhisperFeatureExtractor.__call__`] attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but is not used. By default the silence in the input log mel spectrogram are ignored. decoder_input_ids (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. decoder_attention_mask (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Whisper does not use `position_ids` in the encoder as `input_features` is always the same size and doesn't use masking, but this argument is preserved for compatibility. By default the silence in the input log mel spectrogram are ignored. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ WHISPER_ENCODE_INPUTS_DOCSTRING = r""" Args: input_features (`numpy.ndarray` of shape `(batch_size, feature_size, sequence_length)`): Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `numpy.ndarray`. See [`~WhisperFeatureExtractor.__call__`]. attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but is not used. By default the silence in the input log mel spectrogram are ignored. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ WHISPER_DECODE_INPUTS_DOCSTRING = r""" Args: decoder_input_ids (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) encoder_outputs (`tuple(tuple(numpy.ndarray)`): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but it is not used. By default the silence in the input log mel spectrogram are ignored. decoder_attention_mask (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. past_key_values (`dict[str, numpy.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class FlaxWhisperAttention(nn.Module): config: WhisperConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {self.num_heads})." ) dense = partial( nn.Dense, self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.q_proj = dense(use_bias=self.bias) self.k_proj = dense(use_bias=False) self.v_proj = dense(use_bias=self.bias) self.out_proj = dense(use_bias=self.bias) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_target_positions), dtype="bool"), dtype="bool" ) def __call__( self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray] = None, attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, ) -> tuple[jnp.ndarray]: is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.q_proj(hidden_states) if is_cross_attention: key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length), ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return attn_output, attn_weights def _split_heads(self, hidden_state) -> jnp.ndarray: return hidden_state.reshape(hidden_state.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_state) -> jnp.ndarray: return hidden_state.reshape(hidden_state.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask) -> tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]: # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only # attend to those key positions that have already been generated and cached, not the # remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask # Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Whisper class FlaxWhisperEncoderLayer(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxWhisperAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = nn.Dense( self.config.encoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool = True, deterministic: bool = True, ) -> tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class FlaxWhisperEncoderLayerCollection(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxWhisperEncoderCheckpointLayer = remat(FlaxWhisperEncoderLayer, static_argnums=(2, 3)) self.layers = [ FlaxWhisperEncoderCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] else: self.layers = [ FlaxWhisperEncoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.encoder_layers) ] self.layerdrop = self.config.encoder_layerdrop def __call__( self, hidden_states, attention_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for encoder_layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Whisper class FlaxWhisperDecoderLayer(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxWhisperAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.encoder_attn = FlaxWhisperAttention( config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, ) self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) self.fc1 = nn.Dense( self.config.decoder_ffn_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) self.fc2 = nn.Dense( self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std) ) self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = True, deterministic: bool = True, ) -> tuple[jnp.ndarray]: residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, ) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs class FlaxWhisperDecoderLayerCollection(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxWhisperDecoderCheckpointLayer = remat(FlaxWhisperDecoderLayer, static_argnums=(4, 5, 6)) self.layers = [ FlaxWhisperDecoderCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] else: self.layers = [ FlaxWhisperDecoderLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.decoder_layers) ] self.layerdrop = self.config.decoder_layerdrop def __call__( self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, deterministic: bool = True, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = random.uniform(0, 1) if not deterministic and (dropout_probability < self.layerdrop): layer_outputs = (None, None, None) else: layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, init_cache, output_attentions, deterministic, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) class FlaxWhisperEncoder(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self) -> None: self.conv1 = nn.Conv( self.config.d_model, kernel_size=(3,), padding=1, kernel_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype, ) self.conv2 = nn.Conv( self.config.d_model, kernel_size=(3,), strides=2, padding=1, kernel_init=jax.nn.initializers.normal(self.config.init_std), dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.layers = FlaxWhisperEncoderLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.embed_positions = nn.Embed( self.config.max_source_positions, self.config.d_model, dtype=self.dtype, embedding_init=sinusoidal_embedding_init, ) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05) def __call__( self, input_features: jnp.ndarray, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ) -> tuple[jnp.ndarray]: if input_features.shape[1:] != (self.config.num_mel_bins, self.config.max_source_positions * 2): raise ValueError( "input_features.shape[1:], must be equal to (self.config.num_mel_bins," f" self.config.max_source_positions * 2) (got {input_features.shape[1:]}, but should be" f" ({self.config.num_mel_bins}, {self.config.max_source_positions * 2}))" ) input_features = input_features.transpose(0, 2, 1) hidden_states = jax.nn.gelu(self.conv1(input_features), approximate=False) hidden_states = jax.nn.gelu(self.conv2(hidden_states), approximate=False) embed_positions = self.embed_positions(jnp.arange(self.config.max_source_positions)) # freeze the sinusoidal embeddings by stopping the back-prop embed_positions = jax.lax.stop_gradient(embed_positions) hidden_states = hidden_states + embed_positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask=None, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutput( last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, ) class FlaxWhisperDecoder(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self) -> None: self.embed_tokens = nn.Embed(self.config.vocab_size, self.config.d_model, dtype=self.dtype) self.embed_positions = nn.Embed(self.config.max_target_positions, self.config.d_model, dtype=self.dtype) self.layers = FlaxWhisperDecoderLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-5) def __call__( self, input_ids: jnp.ndarray, attention_mask: jnp.ndarray, position_ids: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray] = None, init_cache: bool = False, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ) -> tuple[jnp.ndarray]: input_embeds = self.embed_tokens(input_ids) position_embeds = self.embed_positions(position_ids) hidden_states = input_embeds + position_embeds hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) # update the last element in `hidden_states` after applying `layernorm` above hidden_states = None if output_hidden_states: hidden_states = outputs[1] hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) class FlaxWhisperModule(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self) -> None: self.encoder = FlaxWhisperEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.decoder = FlaxWhisperDecoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) def __call__( self, input_features: jnp.ndarray, decoder_input_ids: jnp.ndarray, decoder_attention_mask: jnp.ndarray, decoder_position_ids: jnp.ndarray, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): encoder_outputs = self.encoder( input_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder class FlaxWhisperPreTrainedModel(FlaxPreTrainedModel): config_class = WhisperConfig base_model_prefix: str = "model" main_input_name = "input_features" module_class: nn.Module = None def __init__( self, config: WhisperConfig, input_shape: Optional[tuple[int]] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) if input_shape is None: input_shape = (1, config.num_mel_bins, 2 * config.max_source_positions) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_features = jnp.zeros(input_shape, dtype="f4") input_features = input_features.at[(..., -1)].set(self.config.eos_token_id) decoder_input_ids = jnp.zeros((input_shape[0], 1), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel.init_cache with Bart->Whisper def init_cache(self, batch_size, max_length, encoder_outputs): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`): `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. """ # init input variables to retrieve cache decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4") decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to( jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape ) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs, ) init_variables = self.module.init( jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward, # we only need to call the decoder to init the cache ) return unfreeze(init_variables["cache"]) @add_start_docstrings(WHISPER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=WhisperConfig) def encode( self, input_features: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, **kwargs, ): r""" Returns: Example: ```python >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np") >>> input_features = inputs.input_features >>> encoder_outputs = model.encode(input_features=input_features) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _encoder_forward(module, input_features, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_features, **kwargs) return self.module.apply( {"params": params or self.params}, input_features=jnp.array(input_features, dtype="f4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward, ) @add_start_docstrings(WHISPER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=WhisperConfig) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: Optional[dict] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration >>> from datasets import load_dataset >>> import jax.numpy as jnp >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_features = processor(ds[0]["audio"]["array"], return_tensors="np").input_features >>> encoder_outputs = model.encode(input_features=input_features) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((input_features.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] batch_size, sequence_length = decoder_input_ids.shape if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") if decoder_attention_mask is not None: decoder_position_ids = (decoder_attention_mask.cumsum(-1) * decoder_attention_mask) - 1 else: decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxWhisperAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs, ) outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past = outputs outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past = outputs outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) def __call__( self, input_features: jnp.ndarray, decoder_input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # prepare decoder inputs if decoder_position_ids is None: if decoder_attention_mask is not None: decoder_position_ids = (decoder_attention_mask.cumsum(-1) * decoder_attention_mask) - 1 else: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) # Handle any PRNG if needed rngs = {"dropout": dropout_rng} if dropout_rng is not None else {} return self.module.apply( {"params": params or self.params}, input_features=jnp.array(input_features, dtype="f4"), decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, ) @add_start_docstrings( "The bare Whisper Model transformer outputting raw hidden-states without any specific head on top.", WHISPER_START_DOCSTRING, ) class FlaxWhisperModel(FlaxWhisperPreTrainedModel): config: WhisperConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation module_class = FlaxWhisperModule append_call_sample_docstring(FlaxWhisperModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) class FlaxWhisperForConditionalGenerationModule(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self) -> None: self.model = FlaxWhisperModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.lm_head = nn.Dense( self.config.vocab_size, use_bias=False, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std), ) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__( self, input_features, decoder_input_ids, decoder_attention_mask: jnp.ndarray = None, decoder_position_ids: jnp.ndarray = None, position_ids: jnp.ndarray = None, attention_mask: jnp.ndarray = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, deterministic: bool = True, ): outputs = self.model( input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.decoder.embed_tokens.variables["params"]["embedding"] lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput( logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings("The Whisper Model with a language modeling head.", WHISPER_START_DOCSTRING) class FlaxWhisperForConditionalGeneration(FlaxWhisperPreTrainedModel): module_class = FlaxWhisperForConditionalGenerationModule dtype: jnp.dtype = jnp.float32 @add_start_docstrings(WHISPER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=WhisperConfig) def decode( self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray] = None, decoder_attention_mask: Optional[jnp.ndarray] = None, decoder_position_ids: Optional[jnp.ndarray] = None, past_key_values: Optional[dict] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, ): r""" Returns: Example: ```python >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np") >>> input_features = inputs.input_features >>> encoder_outputs = model.encode(input_features=input_features) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] batch_size, sequence_length = decoder_input_ids.shape if decoder_position_ids is None: if past_key_values is not None: raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.") if decoder_attention_mask is not None: decoder_position_ids = (decoder_attention_mask.cumsum(-1) * decoder_attention_mask) - 1 else: decoder_position_ids = jnp.broadcast_to( jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) ) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length), dtype="i4") # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} # if past_key_values are passed then cache is already initialized a private flag init_cache has to be # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that # it can be changed by FlaxWhisperAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.decoder.embed_tokens.variables["params"]["embedding"] lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) return lm_logits, outputs outputs = self.module.apply( inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward, ) if past_key_values is None: lm_logits, decoder_outputs = outputs else: (lm_logits, decoder_outputs), past = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions( logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, ) else: outputs = (lm_logits,) + decoder_outputs[1:] # add updated cache to model output if past_key_values is not None and return_dict: outputs["past_key_values"] = unfreeze(past["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] return outputs def generate( self, input_features, generation_config=None, logits_processor=None, return_timestamps=None, task=None, language=None, is_multilingual=None, **kwargs, ): if generation_config is None: generation_config = self.generation_config if return_timestamps is not None: generation_config.return_timestamps = return_timestamps if task is not None: generation_config.task = task if is_multilingual is not None: generation_config.is_multilingual = is_multilingual if language is not None: generation_config.language = language if kwargs is not None and "decoder_input_ids" in kwargs: decoder_input_length = len(kwargs["decoder_input_ids"]) else: decoder_input_length = 1 forced_decoder_ids = [] if hasattr(generation_config, "is_multilingual") and generation_config.is_multilingual: if hasattr(generation_config, "language"): forced_decoder_ids.append((1, generation_config.lang_to_id[generation_config.language])) else: forced_decoder_ids.append((1, None)) if hasattr(generation_config, "task"): forced_decoder_ids.append((2, generation_config.task_to_id[generation_config.task])) else: forced_decoder_ids.append((2, generation_config.task_to_id["transcribe"])) if ( hasattr(generation_config, "return_timestamps") and generation_config.return_timestamps ) or return_timestamps: logits_processor = [ FlaxWhisperTimeStampLogitsProcessor(generation_config, self.config, decoder_input_length) ] else: if forced_decoder_ids and forced_decoder_ids[-1][0] != generation_config.no_timestamps_token_id: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) if len(forced_decoder_ids) > 0: generation_config.forced_decoder_ids = forced_decoder_ids return super().generate( input_features, generation_config, logits_processor=logits_processor, **kwargs, ) def prepare_inputs_for_generation( self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array] = None, decoder_attention_mask: Optional[jax.Array] = None, encoder_outputs=None, **kwargs, ): # initializing the cache batch_size, seq_length = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyways. # Thus we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "encoder_attention_mask": attention_mask, "decoder_attention_mask": extended_attention_mask, "decoder_position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1 return model_kwargs FLAX_WHISPER_CONDITIONAL_GENERATION_DOCSTRING = r""" Returns: Transcription example: ```python >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration >>> from datasets import load_dataset >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np") >>> input_features = inputs.input_features >>> generated_ids = model.generate(input_ids=input_features) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> transcription ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.' ``` """ overwrite_call_docstring( FlaxWhisperForConditionalGeneration, WHISPER_INPUTS_DOCSTRING + FLAX_WHISPER_CONDITIONAL_GENERATION_DOCSTRING ) append_replace_return_docstrings( FlaxWhisperForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC ) class FlaxWhisperForAudioClassificationModule(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self) -> None: self.encoder = FlaxWhisperEncoder( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.config.is_encoder_decoder = False num_layers = self.config.num_hidden_layers + 1 if self.config.use_weighted_layer_sum: self.layer_weights = jnp.repeat(1 / num_layers, num_layers) self.projector = nn.Dense(self.config.classifier_proj_size, dtype=self.dtype) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_features, encoder_outputs=None, output_attentions=None, output_hidden_states: bool = True, return_dict: bool = True, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = jnp.stack(encoder_outputs, axis=1) norm_weights = jax.nn.softmax(self.layer_weights, axis=-1) hidden_states = jnp.sum(hidden_states * jnp.reshape(norm_weights, [-1, 1, 1]), axis=1) else: hidden_states = encoder_outputs[0] hidden_states = self.projector(hidden_states) pooled_output = jnp.mean(hidden_states, axis=1) logits = self.classifier(pooled_output) if not return_dict: return (logits,) + encoder_outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("The Whisper Model with an audio classification head on top.", WHISPER_START_DOCSTRING) class FlaxWhisperForAudioClassification(FlaxWhisperPreTrainedModel): module_class = FlaxWhisperForAudioClassificationModule dtype: jnp.dtype = jnp.float32 def init_weights(self, rng: jax.random.PRNGKey, input_shape: tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_features = jnp.zeros(input_shape, dtype="f4") input_features = input_features.at[(..., -1)].set(self.config.eos_token_id) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} random_params = self.module.init( rngs, input_features=input_features, )["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) def __call__( self, input_features: jnp.ndarray, attention_mask: Optional[jnp.ndarray] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, train: bool = False, params: Optional[dict] = None, dropout_rng: PRNGKey = None, **kwargs, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, input_features=jnp.array(input_features, dtype="f4"), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) FLAX_WHISPER_AUDIO_CLASSIFICATION_DOCSTRING = r""" Returns: Transcription example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoFeatureExtractor, FlaxWhisperForAudioClassification >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id") >>> model = FlaxWhisperForAudioClassification.from_pretrained( ... "sanchit-gandhi/whisper-medium-fleurs-lang-id", from_pt=True ... ) >>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True) >>> sample = next(iter(ds)) >>> inputs = feature_extractor( ... sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="np" ... ) >>> input_features = inputs.input_features >>> logits = model(input_features).logits >>> predicted_class_ids = jnp.argmax(logits).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label 'af_za' ``` """ overwrite_call_docstring( FlaxWhisperForAudioClassification, WHISPER_INPUTS_DOCSTRING + FLAX_WHISPER_AUDIO_CLASSIFICATION_DOCSTRING ) append_replace_return_docstrings( FlaxWhisperForAudioClassification, output_type=FlaxSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC ) __all__ = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ]
transformers/src/transformers/models/whisper/modeling_flax_whisper.py/0
{ "file_path": "transformers/src/transformers/models/whisper/modeling_flax_whisper.py", "repo_id": "transformers", "token_count": 32413 }
551
# coding=utf-8 # Copyright 2019 Facebook AI Research and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 XLM-RoBERTa model.""" from __future__ import annotations import math import warnings import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_xlm_roberta import XLMRobertaConfig logger = logging.get_logger(__name__) logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "FacebookAI/xlm-roberta-base" _CONFIG_FOR_DOC = "XLMRobertaConfig" XLM_ROBERTA_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`XLMRobertaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ XLM_ROBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->XLMRoberta class TFXLMRobertaEmbeddings(keras.layers.Layer): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config, **kwargs): super().__init__(**kwargs) self.padding_idx = 1 self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: tf.Tensor Returns: tf.Tensor """ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask return incremental_indices + self.padding_idx def call( self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False, ): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids( input_ids=input_ids, past_key_values_length=past_key_values_length ) else: position_ids = tf.expand_dims( tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->XLMRoberta class TFXLMRobertaPooler(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(inputs=first_token_tensor) return pooled_output def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->XLMRoberta class TFXLMRobertaSelfAttention(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number " f"of attention heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.sqrt_att_head_size = math.sqrt(self.attention_head_size) self.query = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder self.config = config def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] return tf.transpose(tensor, perm=[0, 2, 1, 3]) def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(inputs=hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) key_layer = tf.concat([past_key_value[0], key_layer], axis=2) value_layer = tf.concat([past_key_value[1], value_layer], axis=2) else: key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch size, num_heads, seq_len_q, seq_len_k) attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) attention_scores = tf.divide(attention_scores, dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFXLMRobertaModel call() function) attention_scores = tf.add(attention_scores, attention_mask) # Normalize the attention scores to probabilities. attention_probs = stable_softmax(logits=attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(inputs=attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = tf.multiply(attention_probs, head_mask) attention_output = tf.matmul(attention_probs, value_layer) attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, all_head_size) attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->XLMRoberta class TFXLMRobertaSelfOutput(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->XLMRoberta class TFXLMRobertaAttention(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.self_attention = TFXLMRobertaSelfAttention(config, name="self") self.dense_output = TFXLMRobertaSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call( self, input_tensor: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor, encoder_attention_mask: tf.Tensor, past_key_value: tuple[tf.Tensor], output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: self_outputs = self.self_attention( hidden_states=input_tensor, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self.dense_output( hidden_states=self_outputs[0], input_tensor=input_tensor, training=training ) # add attentions (possibly with past_key_value) if we output them outputs = (attention_output,) + self_outputs[1:] return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->XLMRoberta class TFXLMRobertaIntermediate(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->XLMRoberta class TFXLMRobertaOutput(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) self.config = config def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.dropout(inputs=hidden_states, training=training) hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->XLMRoberta class TFXLMRobertaLayer(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.attention = TFXLMRobertaAttention(config, name="attention") self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TFXLMRobertaAttention(config, name="crossattention") self.intermediate = TFXLMRobertaIntermediate(config, name="intermediate") self.bert_output = TFXLMRobertaOutput(config, name="output") def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_value: tuple[tf.Tensor] | None, output_attentions: bool, training: bool = False, ) -> tuple[tf.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( input_tensor=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( input_tensor=attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, training=training, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value intermediate_output = self.intermediate(hidden_states=attention_output) layer_output = self.bert_output( hidden_states=intermediate_output, input_tensor=attention_output, training=training ) outputs = (layer_output,) + outputs # add attentions if we output them # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) if getattr(self, "crossattention", None) is not None: with tf.name_scope(self.crossattention.name): self.crossattention.build(None) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->XLMRoberta class TFXLMRobertaEncoder(keras.layers.Layer): def __init__(self, config: XLMRobertaConfig, **kwargs): super().__init__(**kwargs) self.config = config self.layer = [TFXLMRobertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, head_mask: tf.Tensor, encoder_hidden_states: tf.Tensor | None, encoder_attention_mask: tf.Tensor | None, past_key_values: tuple[tuple[tf.Tensor]] | None, use_cache: bool | None, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool = False, ) -> TFBaseModelOutputWithPastAndCrossAttentions | tuple[tf.Tensor]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) past_key_value = past_key_values[i] if past_key_values is not None else None layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, training=training, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) @keras_serializable # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaMainLayer with Roberta->XLMRoberta class TFXLMRobertaMainLayer(keras.layers.Layer): config_class = XLMRobertaConfig def __init__(self, config, add_pooling_layer=True, **kwargs): super().__init__(**kwargs) self.config = config self.is_decoder = config.is_decoder self.num_hidden_layers = config.num_hidden_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.encoder = TFXLMRobertaEncoder(config, name="encoder") self.pooler = TFXLMRobertaPooler(config, name="pooler") if add_pooling_layer else None # The embeddings must be the last declaration in order to follow the weights order self.embeddings = TFXLMRobertaEmbeddings(config, name="embeddings") # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings def get_input_embeddings(self) -> keras.layers.Layer: return self.embeddings # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings def set_input_embeddings(self, value: tf.Variable): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError @unpack_inputs # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool = False, ) -> TFBaseModelOutputWithPoolingAndCrossAttentions | tuple[tf.Tensor]: if not self.config.is_decoder: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape if past_key_values is None: past_key_values_length = 0 past_key_values = [None] * len(self.encoder.layer) else: past_key_values_length = shape_list(past_key_values[0][0])[-2] if attention_mask is None: attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, training=training, ) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) mask_seq_length = seq_length + past_key_values_length # Copied from `modeling_tf_t5.py` # Provided a padding mask of dimensions [batch_size, mask_seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] if self.is_decoder: seq_ids = tf.range(mask_seq_length) causal_mask = tf.less_equal( tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), seq_ids[None, :, None], ) causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) extended_attention_mask = causal_mask * attention_mask[:, None, :] attention_mask_shape = shape_list(extended_attention_mask) extended_attention_mask = tf.reshape( extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) ) if past_key_values[0] is not None: # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] else: extended_attention_mask = tf.reshape( attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.is_decoder and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers encoder_outputs = self.encoder( hidden_states=embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None if not return_dict: return ( sequence_output, pooled_output, ) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "pooler", None) is not None: with tf.name_scope(self.pooler.name): self.pooler.build(None) if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaPreTrainedModel with Roberta->XLMRoberta class TFXLMRobertaPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLMRobertaConfig base_model_prefix = "roberta" @add_start_docstrings( "The bare XLM RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaModel(TFXLMRobertaPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, name="roberta") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, training: bool | None = False, ) -> tuple | TFBaseModelOutputWithPoolingAndCrossAttentions: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation """ outputs = self.roberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->XLMRoberta class TFXLMRobertaLMHead(keras.layers.Layer): """XLMRoberta Head for masked language modeling.""" def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.act = get_tf_activation("gelu") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = input_embeddings def build(self, input_shape=None): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "layer_norm", None) is not None: with tf.name_scope(self.layer_norm.name): self.layer_norm.build([None, None, self.config.hidden_size]) def get_output_embeddings(self): return self.decoder def set_output_embeddings(self, value): self.decoder.weight = value self.decoder.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.layer_norm(hidden_states) # project back to size of vocabulary with bias seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states @add_start_docstrings("""XLM RoBERTa Model with a `language modeling` head on top.""", XLM_ROBERTA_START_DOCSTRING) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForMaskedLM(TFXLMRobertaPreTrainedModel, TFMaskedLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.lm_head = TFXLMRobertaLMHead(config, self.roberta.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, mask="<mask>", expected_output="' Paris'", expected_loss=0.1, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFMaskedLMOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) @add_start_docstrings( "XLM-RoBERTa Model with a `language modeling` head on top for CLM fine-tuning.", XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForCausalLM(TFXLMRobertaPreTrainedModel, TFCausalLanguageModelingLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] def __init__(self, config: XLMRobertaConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) if not config.is_decoder: logger.warning("If you want to use `TFXLMRobertaLMHeadModel` as a standalone, add `is_decoder=True.`") self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.lm_head = TFXLMRobertaLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head") def get_lm_head(self): return self.lm_head def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_head.name # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = tf.ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, past_key_values: tuple[tuple[np.ndarray | tf.Tensor]] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFCausalLMOutputWithCrossAttentions | tuple[tf.Tensor]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple[tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ outputs = self.roberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.lm_head(hidden_states=sequence_output, training=training) loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "lm_head", None) is not None: with tf.name_scope(self.lm_head.name): self.lm_head.build(None) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead with Roberta->XLMRoberta class TFXLMRobertaClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, features, training=False): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, training=training) x = self.dense(x) x = self.dropout(x, training=training) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ XLM RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForSequenceClassification(TFXLMRobertaPreTrainedModel, TFSequenceClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.classifier = TFXLMRobertaClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="cardiffnlp/twitter-roberta-base-emotion", output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="'optimism'", expected_loss=0.08, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFSequenceClassifierOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ XLM Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForMultipleChoice(TFXLMRobertaPreTrainedModel, TFMultipleChoiceLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.roberta = TFXLMRobertaMainLayer(config, name="roberta") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFMultipleChoiceModelOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None outputs = self.roberta( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, training=training) logits = self.classifier(pooled_output) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ XLM RoBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForTokenClassification(TFXLMRobertaPreTrainedModel, TFTokenClassificationLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] _keys_to_ignore_on_load_missing = [r"dropout"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="ydshieh/roberta-large-ner-english", output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", expected_loss=0.01, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, labels: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFTokenClassifierOutput | tuple[tf.Tensor]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ XLM RoBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLM_ROBERTA_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering with Roberta->XLMRoberta, ROBERTA->XLM_ROBERTA class TFXLMRobertaForQuestionAnswering(TFXLMRobertaPreTrainedModel, TFQuestionAnsweringLoss): # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.roberta = TFXLMRobertaMainLayer(config, add_pooling_layer=False, name="roberta") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(XLM_ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint="ydshieh/roberta-base-squad2", output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, expected_output="' puppet'", expected_loss=0.86, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: np.ndarray | tf.Tensor | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, start_positions: np.ndarray | tf.Tensor | None = None, end_positions: np.ndarray | tf.Tensor | None = None, training: bool | None = False, ) -> TFQuestionAnsweringModelOutput | tuple[tf.Tensor]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "roberta", None) is not None: with tf.name_scope(self.roberta.name): self.roberta.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size]) __all__ = [ "TFXLMRobertaForCausalLM", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", "TFXLMRobertaPreTrainedModel", ]
transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py/0
{ "file_path": "transformers/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py", "repo_id": "transformers", "token_count": 35114 }
552
# Copyright 2025 NXAI GmbH. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """xLSTM configuration.""" from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import is_xlstm_available, logging if is_xlstm_available(): from xlstm.xlstm_large.model import ( BackendModeType, ChunkwiseKernelType, DtypeType, SequenceKernelType, StepKernelType, WeightModeType, round_up_to_next_multiple_of, xLSTMLargeConfig, ) external_xlstm = True else: from typing import Literal BackendModeType = Literal["train", "train_with_padding", "inference"] ChunkwiseKernelType = Literal[ "chunkwise--native_autograd", "parallel--native_autograd", ] DtypeType = Literal["float32", "bfloat16", "float16"] SequenceKernelType = Literal["native_sequence__native"] StepKernelType = Literal["native"] WeightModeType = Literal["single", "fused"] def round_up_to_next_multiple_of(x: int, multiple_of: int) -> int: """Rounds up x to the next multiple of multiple_of.""" return int(((x + multiple_of - 1) // multiple_of) * multiple_of) external_xlstm = False logger = logging.get_logger(__name__) class xLSTMConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`xLSTM`]. It is used to instantiate a xLSTM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the xLSTM-7b [NX-AI/xLSTM-7b](https://huggingface.co/NX-AI/xLSTM-7b) model. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (int, optional, *optional*, defaults to 50304): Vocabulary size of the xLSTM model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`xLSTMModel`]. Defaults to the GPT2-NeoX tokenizer size. hidden_size (int, optional, *optional*, defaults to 4096): Dimensionality of the embeddings or hidden states. embedding_dim (int, optional, *optional*, defaults to 4096): Dimensionality of the embeddings or hidden states, use hidde_size if None. num_hidden_layers (int, optional, *optional*, defaults to 32): Number of blocks of the xLSTM model. num_blocks (int, optional, *optional*, defaults to 32): Number of blocks of the xLSTM model, use num_hidden_layers if None. num_heads (int, optional, *optional*, defaults to 8): Number of heads for the xLSTM Layer/Cell. use_bias (bool, optional, *optional*, defaults to `False`): Whether to use biases in the xLSTM model. norm_reduction_force_float32 (bool, optional, *optional*, defaults to `True`): Whether to force the float32 norm reduction op to be done in fp32 precision. tie_word_embeddings (bool, optional, *optional*, defaults to `False`): Whether to tie word embeddings to the lm head weights. add_out_norm (bool, optional, *optional*, defaults to `True`): Whether to add an output norm after the blocks before the LMHead. norm_eps (float, optional, *optional*, defaults to 1e-06): Norm eps for RMSNorm and Layer Norm. qk_dim_factor (float, optional, *optional*, defaults to 0.5): Scale factor for the query and key dimension. v_dim_factor (float, optional, *optional*, defaults to 1.0): Scale factor for the value dimension. chunkwise_kernel (ChunkwiseKernelType, optional, *optional*, defaults to `"chunkwise--native_autograd"`): Kernel type for chunkwise processing mode. sequence_kernel (SequenceKernelType, optional, *optional*, defaults to `"native_sequence__native"`): Kernel type for sequence processing mode. step_kernel (StepKernelType, optional, *optional*, defaults to `"native"`): Kernel type for step processing mode. mode (BackendModeType, optional, *optional*, defaults to `"inference"`): Operation mode (inference is needed for generation). chunk_size (int, optional, *optional*, defaults to 64): Internal chunk size. return_last_states (bool, optional, *optional*, defaults to `True`): If to return the last states / cache internally. Needed as True for generation. autocast_kernel_dtype (DtypeType, optional, *optional*, defaults to `"bfloat16"`): Kernel dtype for the states. eps (float, optional, *optional*, defaults to 1e-06): Epsilon for the mLSTM cell post norm. inference_state_dtype (DtypeType, optional, *optional*, defaults to `"float32"`): Kernel dtype for states in inference. ffn_proj_factor (float, optional, *optional*, defaults to 2.667): Size factor of the post-up projection gated Feed Forward network. ffn_round_up_to_multiple_of (int, optional, *optional*, defaults to 64): Size factor round value of the post-up projection gated Feed Forward network. gate_soft_cap (float, optional, *optional*, defaults to 15.0): Gate soft cap scale. output_logit_soft_cap (float, optional, *optional*, defaults to 30.0): Output logit soft cap scale. weight_mode (`Literal`, *optional*, defaults to `"single"`): Whether parallel linear layers are separated or fused (single). use_cache (bool, optional, *optional*, defaults to `True`): Whether to use the cache (xLSTMCache). pad_token_id (int, optional, *optional*, defaults to 1): Pad token id needed for generation. bos_token_id (int, optional, *optional*, defaults to 0): BOS token id needed for generation. eos_token_id (int, optional, *optional*, defaults to 2): EOS token id needed for generation. max_inference_chunksize (int, optional, *optional*, defaults to 16384): Limit the chunk size for inference to save memory. Example: ```python >>> from transformers import xLSTMConfig, xLSTMModel >>> # Initializing a xLSTM configuration >>> configuration = xLSTMConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = xLSTMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "xlstm" def __init__( self, vocab_size: int = 50304, hidden_size: int = 4096, embedding_dim: Optional[int] = None, num_hidden_layers: Optional[int] = 32, num_blocks: Optional[int] = None, num_heads: int = 8, use_bias: bool = False, norm_reduction_force_float32: bool = True, tie_word_embeddings: bool = False, add_out_norm: bool = True, norm_eps: float = 1e-6, # mlstm_layer qk_dim_factor: float = 0.5, v_dim_factor: float = 1.0, # mlstm backend chunkwise_kernel: ChunkwiseKernelType = "chunkwise--native_autograd", sequence_kernel: SequenceKernelType = "native_sequence__native", step_kernel: StepKernelType = "native", # nedded to enable generation mode: BackendModeType = "inference", chunk_size: int = 64, # needed to be true for generation return_last_states: bool = True, autocast_kernel_dtype: DtypeType = "bfloat16", eps: float = 1e-6, inference_state_dtype: DtypeType = "float32", # feedforward ffn_proj_factor: float = 2.667, ffn_round_up_to_multiple_of: int = 64, # capping gate_soft_cap: float = 15.0, output_logit_soft_cap: float = 30.0, # weights weight_mode: WeightModeType = "single", # HF interface use_cache: bool = True, pad_token_id: int = 1, bos_token_id: int = 0, eos_token_id: int = 2, max_inference_chunksize: int = 16384, **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size if hidden_size is not None else embedding_dim self.embedding_dim = embedding_dim if embedding_dim is not None else hidden_size self.num_hidden_layers = num_hidden_layers if num_hidden_layers is not None else num_blocks self.num_blocks = num_blocks if num_blocks is not None else num_hidden_layers self.num_heads = num_heads self.use_bias = use_bias self.tie_word_embeddings = tie_word_embeddings self.add_out_norm = add_out_norm self.norm_eps = norm_eps self.norm_reduction_force_float32 = norm_reduction_force_float32 # mlstm_layer self.qk_dim_factor = qk_dim_factor self.v_dim_factor = v_dim_factor # mlstm backend self.chunkwise_kernel = chunkwise_kernel self.sequence_kernel = sequence_kernel self.step_kernel = step_kernel self.mode = mode self.chunk_size = chunk_size self.return_last_states = return_last_states self.autocast_kernel_dtype = autocast_kernel_dtype self.eps = eps self.inference_state_dtype = inference_state_dtype # feedforward self.ffn_proj_factor = ffn_proj_factor self.ffn_round_up_to_multiple_of = ffn_round_up_to_multiple_of # capping self.gate_soft_cap = gate_soft_cap self.output_logit_soft_cap = output_logit_soft_cap self.weight_mode = weight_mode self.use_cache = use_cache self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.max_inference_chunksize = max_inference_chunksize super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) @property def qk_dim(self): return round_up_to_next_multiple_of( self.hidden_size * self.qk_dim_factor, multiple_of=64, ) @property def v_dim(self): return round_up_to_next_multiple_of( self.hidden_size * self.v_dim_factor, multiple_of=64, ) @property def qk_head_dim(self): return self.qk_dim // self.num_heads @property def v_head_dim(self): return self.v_dim // self.num_heads def to_xlstm_block_config(self): if external_xlstm: return xLSTMLargeConfig( vocab_size=self.vocab_size, embedding_dim=self.hidden_size, num_blocks=self.num_hidden_layers, num_heads=self.num_heads, use_bias=self.use_bias, add_out_norm=self.add_out_norm, norm_eps=self.norm_eps, norm_reduction_force_float32=self.norm_reduction_force_float32, # mlstm_layer qk_dim_factor=self.qk_dim_factor, v_dim_factor=self.v_dim_factor, # mlstm backend chunkwise_kernel=self.chunkwise_kernel, sequence_kernel=self.sequence_kernel, step_kernel=self.step_kernel, mode=self.mode, chunk_size=self.chunk_size, return_last_states=self.return_last_states, autocast_kernel_dtype=self.autocast_kernel_dtype, eps=self.eps, inference_state_dtype=self.inference_state_dtype, # feedforward ffn_proj_factor=self.ffn_proj_factor, ffn_round_up_to_multiple_of=self.ffn_round_up_to_multiple_of, # capping gate_soft_cap=self.gate_soft_cap, output_logit_soft_cap=self.output_logit_soft_cap, weight_mode=self.weight_mode, ) else: return self __all__ = ["xLSTMConfig"]
transformers/src/transformers/models/xlstm/configuration_xlstm.py/0
{ "file_path": "transformers/src/transformers/models/xlstm/configuration_xlstm.py", "repo_id": "transformers", "token_count": 5505 }
553
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import sys import warnings from argparse import ArgumentParser from pathlib import Path from packaging import version from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer from ..utils import logging from ..utils.import_utils import is_optimum_available from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import get_preprocessor MIN_OPTIMUM_VERSION = "1.5.0" ENCODER_DECODER_MODELS = ["vision-encoder-decoder"] def export_with_optimum(args): if is_optimum_available(): from optimum.version import __version__ as optimum_version parsed_optimum_version = version.parse(optimum_version) if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION): raise RuntimeError( f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You " "can upgrade optimum by running: pip install -U optimum[exporters]" ) else: raise RuntimeError( "transformers.onnx requires optimum to run, you can install the library by running: pip install " "optimum[exporters]" ) cmd_line = [ sys.executable, "-m", "optimum.exporters.onnx", f"--model {args.model}", f"--task {args.feature}", f"--framework {args.framework}" if args.framework is not None else "", f"{args.output}", ] proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE) proc.wait() logger.info( "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as " "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: " "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model." ) def export_with_transformers(args): args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx") if not args.output.parent.exists(): args.output.parent.mkdir(parents=True) # Allocate the model model = FeaturesManager.get_model_from_feature( args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir ) model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature) onnx_config = model_onnx_config(model.config) if model_kind in ENCODER_DECODER_MODELS: encoder_model = model.get_encoder() decoder_model = model.get_decoder() encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) decoder_onnx_config = onnx_config.get_decoder_config( encoder_model.config, decoder_model.config, feature=args.feature ) if args.opset is None: args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset): raise ValueError( f"Opset {args.opset} is not sufficient to export {model_kind}. At least " f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required." ) preprocessor = AutoFeatureExtractor.from_pretrained(args.model) onnx_inputs, onnx_outputs = export( preprocessor, encoder_model, encoder_onnx_config, args.opset, args.output.parent.joinpath("encoder_model.onnx"), ) validate_model_outputs( encoder_onnx_config, preprocessor, encoder_model, args.output.parent.joinpath("encoder_model.onnx"), onnx_outputs, args.atol if args.atol else encoder_onnx_config.atol_for_validation, ) preprocessor = AutoTokenizer.from_pretrained(args.model) onnx_inputs, onnx_outputs = export( preprocessor, decoder_model, decoder_onnx_config, args.opset, args.output.parent.joinpath("decoder_model.onnx"), ) validate_model_outputs( decoder_onnx_config, preprocessor, decoder_model, args.output.parent.joinpath("decoder_model.onnx"), onnx_outputs, args.atol if args.atol else decoder_onnx_config.atol_for_validation, ) logger.info( f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()}," f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}" ) else: # Instantiate the appropriate preprocessor if args.preprocessor == "auto": preprocessor = get_preprocessor(args.model) elif args.preprocessor == "tokenizer": preprocessor = AutoTokenizer.from_pretrained(args.model) elif args.preprocessor == "image_processor": preprocessor = AutoImageProcessor.from_pretrained(args.model) elif args.preprocessor == "feature_extractor": preprocessor = AutoFeatureExtractor.from_pretrained(args.model) elif args.preprocessor == "processor": preprocessor = AutoProcessor.from_pretrained(args.model) else: raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") # Ensure the requested opset is sufficient if args.opset is None: args.opset = onnx_config.default_onnx_opset if args.opset < onnx_config.default_onnx_opset: raise ValueError( f"Opset {args.opset} is not sufficient to export {model_kind}. " f"At least {onnx_config.default_onnx_opset} is required." ) onnx_inputs, onnx_outputs = export( preprocessor, model, onnx_config, args.opset, args.output, ) if args.atol is None: args.atol = onnx_config.atol_for_validation validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) logger.info(f"All good, model saved at: {args.output.as_posix()}") warnings.warn( "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend" " using optimum.exporters.onnx in future. You can find more information here:" " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.", FutureWarning, ) def main(): parser = ArgumentParser("Hugging Face Transformers ONNX exporter") parser.add_argument( "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from." ) parser.add_argument( "--feature", default="default", help="The type of features to export the model with.", ) parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.") parser.add_argument( "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model." ) parser.add_argument( "--framework", type=str, choices=["pt", "tf"], default=None, help=( "The framework to use for the ONNX export." " If not provided, will attempt to use the local checkpoint's original framework" " or what is available in the environment." ), ) parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.") parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.") parser.add_argument( "--preprocessor", type=str, choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"], default="auto", help="Which type of preprocessor to use. 'auto' tries to automatically detect it.", ) parser.add_argument( "--export_with_transformers", action="store_true", help=( "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be " "useful when exporting a model supported in transformers but not in optimum, otherwise it is not " "recommended." ), ) args = parser.parse_args() if args.export_with_transformers or not is_optimum_available(): export_with_transformers(args) else: export_with_optimum(args) if __name__ == "__main__": logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name logger.setLevel(logging.INFO) main()
transformers/src/transformers/onnx/__main__.py/0
{ "file_path": "transformers/src/transformers/onnx/__main__.py", "repo_id": "transformers", "token_count": 3988 }
554
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Union, overload import numpy as np from ..utils import ( ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES logger = logging.get_logger(__name__) # Copied from transformers.pipelines.text_classification.sigmoid def sigmoid(_outputs): return 1.0 / (1.0 + np.exp(-_outputs)) # Copied from transformers.pipelines.text_classification.softmax def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) # Copied from transformers.pipelines.text_classification.ClassificationFunction class ClassificationFunction(ExplicitEnum): SIGMOID = "sigmoid" SOFTMAX = "softmax" NONE = "none" @add_end_docstrings( build_pipeline_init_args(has_image_processor=True), r""" function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output.""", ) class ImageClassificationPipeline(Pipeline): """ Image classification pipeline using any `AutoModelForImageClassification`. This pipeline predicts the class of an image. Example: ```python >>> from transformers import pipeline >>> classifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k") >>> classifier("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png") [{'score': 0.442, 'label': 'macaw'}, {'score': 0.088, 'label': 'popinjay'}, {'score': 0.075, 'label': 'parrot'}, {'score': 0.073, 'label': 'parodist, lampooner'}, {'score': 0.046, 'label': 'poll, poll_parrot'}] ``` Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"image-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=image-classification). """ function_to_apply: ClassificationFunction = ClassificationFunction.NONE _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) def _sanitize_parameters(self, top_k=None, function_to_apply=None, timeout=None): preprocess_params = {} if timeout is not None: preprocess_params["timeout"] = timeout postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k if isinstance(function_to_apply, str): function_to_apply = ClassificationFunction(function_to_apply.lower()) if function_to_apply is not None: postprocess_params["function_to_apply"] = function_to_apply return preprocess_params, {}, postprocess_params @overload def __call__(self, inputs: Union[str, "Image.Image"], **kwargs: Any) -> list[dict[str, Any]]: ... @overload def __call__(self, inputs: Union[list[str], list["Image.Image"]], **kwargs: Any) -> list[list[dict[str, Any]]]: ... def __call__( self, inputs: Union[str, list[str], "Image.Image", list["Image.Image"]], **kwargs: Any ) -> Union[list[dict[str, Any]], list[list[dict[str, Any]]]]: """ Assign labels to the image(s) passed as inputs. Args: inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single image or a batch of images, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: If this argument is not specified, then it will apply the following functions according to the number of labels: - If the model has a single label, will apply the sigmoid function on the output. - If the model has several labels, will apply the softmax function on the output. Possible values are: - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. timeout (`float`, *optional*, defaults to None): The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: A dictionary or a list of dictionaries containing result. If the input is a single image, will return a dictionary, if the input is a list of several images, will return a list of dictionaries corresponding to the images. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ # After deprecation of this is completed, remove the default `None` value for `images` if "images" in kwargs: inputs = kwargs.pop("images") if inputs is None: raise ValueError("Cannot call the image-classification pipeline without an inputs argument!") return super().__call__(inputs, **kwargs) def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.framework == "pt": model_inputs = model_inputs.to(self.dtype) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, function_to_apply=None, top_k=5): if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: function_to_apply = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: function_to_apply = ClassificationFunction.SOFTMAX elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: function_to_apply = self.model.config.function_to_apply else: function_to_apply = ClassificationFunction.NONE if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels outputs = model_outputs["logits"][0] if self.framework == "pt" and outputs.dtype in (torch.bfloat16, torch.float16): outputs = outputs.to(torch.float32).numpy() else: outputs = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: scores = sigmoid(outputs) elif function_to_apply == ClassificationFunction.SOFTMAX: scores = softmax(outputs) elif function_to_apply == ClassificationFunction.NONE: scores = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") dict_scores = [ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) ] dict_scores.sort(key=lambda x: x["score"], reverse=True) if top_k is not None: dict_scores = dict_scores[:top_k] return dict_scores
transformers/src/transformers/pipelines/image_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/image_classification.py", "repo_id": "transformers", "token_count": 3973 }
555
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from io import BytesIO from typing import Any, Optional, Union, overload import requests from ..utils import ( add_end_docstrings, is_av_available, is_torch_available, logging, requires_backends, ) from .base import Pipeline, build_pipeline_init_args if is_av_available(): import av import numpy as np if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(build_pipeline_init_args(has_image_processor=True)) class VideoClassificationPipeline(Pipeline): """ Video classification pipeline using any `AutoModelForVideoClassification`. This pipeline predicts the class of a video. This video classification pipeline can currently be loaded from [`pipeline`] using the following task identifier: `"video-classification"`. See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=video-classification). """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "av") self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES) def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None, function_to_apply=None): preprocess_params = {} if frame_sampling_rate is not None: preprocess_params["frame_sampling_rate"] = frame_sampling_rate if num_frames is not None: preprocess_params["num_frames"] = num_frames postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k if function_to_apply is not None: if function_to_apply not in ["softmax", "sigmoid", "none"]: raise ValueError( f"Invalid value for `function_to_apply`: {function_to_apply}. " "Valid options are ['softmax', 'sigmoid', 'none']" ) postprocess_params["function_to_apply"] = function_to_apply else: postprocess_params["function_to_apply"] = "softmax" return preprocess_params, {}, postprocess_params @overload def __call__(self, inputs: str, **kwargs: Any) -> list[dict[str, Any]]: ... @overload def __call__(self, inputs: list[str], **kwargs: Any) -> list[list[dict[str, Any]]]: ... def __call__(self, inputs: Optional[Union[str, list[str]]] = None, **kwargs): """ Assign labels to the video(s) passed as inputs. Args: inputs (`str`, `list[str]`): The pipeline handles three types of videos: - A string containing a http link pointing to a video - A string containing a local path to a video The pipeline accepts either a single video or a batch of videos, which must then be passed as a string. Videos in a batch must all be in the same format: all as http links or all as local paths. top_k (`int`, *optional*, defaults to 5): The number of top labels that will be returned by the pipeline. If the provided number is higher than the number of labels available in the model configuration, it will default to the number of labels. num_frames (`int`, *optional*, defaults to `self.model.config.num_frames`): The number of frames sampled from the video to run the classification on. If not provided, will default to the number of frames specified in the model configuration. frame_sampling_rate (`int`, *optional*, defaults to 1): The sampling rate used to select frames from the video. If not provided, will default to 1, i.e. every frame will be used. function_to_apply(`str`, *optional*, defaults to "softmax"): The function to apply to the model output. By default, the pipeline will apply the softmax function to the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's built-in `None` will default to "softmax", so you need to pass the string "none" to disable any post-processing. Return: A list of dictionaries or a list of list of dictionaries containing result. If the input is a single video, will return a list of `top_k` dictionaries, if the input is a list of several videos, will return a list of list of `top_k` dictionaries corresponding to the videos. The dictionaries contain the following keys: - **label** (`str`) -- The label identified by the model. - **score** (`int`) -- The score attributed by the model for that label. """ # After deprecation of this is completed, remove the default `None` value for `images` if "videos" in kwargs: warnings.warn( "The `videos` argument has been renamed to `inputs`. In version 5 of Transformers, `videos` will no longer be accepted", FutureWarning, ) inputs = kwargs.pop("videos") if inputs is None: raise ValueError("Cannot call the video-classification pipeline without an inputs argument!") return super().__call__(inputs, **kwargs) def preprocess(self, video, num_frames=None, frame_sampling_rate=1): if num_frames is None: num_frames = self.model.config.num_frames if video.startswith("http://") or video.startswith("https://"): video = BytesIO(requests.get(video).content) container = av.open(video) start_idx = 0 end_idx = num_frames * frame_sampling_rate - 1 indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64) video = read_video_pyav(container, indices) video = list(video) model_inputs = self.image_processor(video, return_tensors=self.framework) if self.framework == "pt": model_inputs = model_inputs.to(self.dtype) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5, function_to_apply="softmax"): if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": if function_to_apply == "softmax": probs = model_outputs.logits[0].softmax(-1) elif function_to_apply == "sigmoid": probs = model_outputs.logits[0].sigmoid() else: probs = model_outputs.logits[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] def read_video_pyav(container, indices): frames = [] container.seek(0) start_index = indices[0] end_index = indices[-1] for i, frame in enumerate(container.decode(video=0)): if i > end_index: break if i >= start_index and i in indices: frames.append(frame) return np.stack([x.to_ndarray(format="rgb24") for x in frames])
transformers/src/transformers/pipelines/video_classification.py/0
{ "file_path": "transformers/src/transformers/pipelines/video_classification.py", "repo_id": "transformers", "token_count": 3270 }
556
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib from functools import cached_property from typing import TYPE_CHECKING, Any, Optional, Union from packaging import version from .base import HfQuantizer from .quantizers_utils import get_module_from_name if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from ..utils import ( ACCELERATE_MIN_VERSION, is_accelerate_available, is_bitsandbytes_available, is_torch_available, is_torch_hpu_available, is_torch_npu_available, is_torch_xpu_available, logging, ) if is_torch_available(): import torch from ..pytorch_utils import Conv1D logger = logging.get_logger(__name__) class Bnb4BitHfQuantizer(HfQuantizer): """ 4-bit quantization from bitsandbytes.py quantization method: before loading: converts transformer layers into Linear4bit during loading: load 16bit weight and pass to the layer object after: quantizes individual weights in Linear4bit into 4bit at the first .cuda() call saving: from state dict, as usual; saves weights and `quant_state` components loading: need to locate `quant_state` components and pass to Param4bit constructor """ use_keep_in_fp32_modules = True requires_parameters_quantization = True requires_calibration = False required_packages = ["bitsandbytes", "accelerate"] def __init__(self, quantization_config, **kwargs): super().__init__(quantization_config, **kwargs) if self.quantization_config.llm_int8_skip_modules is not None: self.modules_to_not_convert = self.quantization_config.llm_int8_skip_modules def validate_environment(self, *args, **kwargs): if not is_accelerate_available(): raise ImportError( f"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`" ) if not is_bitsandbytes_available(check_library_only=True): raise ImportError( "Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`" ) if not is_torch_available(): raise ImportError( "The bitsandbytes library requires PyTorch but it was not found in your environment. " "You can install it with `pip install torch`." ) # `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time, # so those versions of the library are practically only available when CUDA is too. if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.1"): if not torch.cuda.is_available(): raise ImportError( "The installed version of bitsandbytes (<0.43.1) requires CUDA, but CUDA is not available. " "You may need to install PyTorch with CUDA support or upgrade bitsandbytes to >=0.43.1." ) from ..integrations import validate_bnb_backend_availability from ..utils import is_bitsandbytes_multi_backend_available bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available() validate_bnb_backend_availability(raise_exception=True) if kwargs.get("from_tf", False) or kwargs.get("from_flax", False): raise ValueError( "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make" " sure the weights are in PyTorch format." ) device_map = kwargs.get("device_map") if ( device_map is not None and isinstance(device_map, dict) and not self.quantization_config.llm_int8_enable_fp32_cpu_offload ): device_map_without_lm_head = { key: device_map[key] for key in device_map if key not in self.modules_to_not_convert } if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled: pass elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): raise ValueError( "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the " "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules " "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to " "`from_pretrained`. Check " "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu " "for more details. " ) def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"): from accelerate.utils import CustomDtype if target_dtype != torch.int8: logger.info("target_dtype {target_dtype} is replaced by `CustomDtype.INT4` for 4-bit BnB quantization") return CustomDtype.INT4 else: raise ValueError( "You are using `device_map='auto'` on a 4bit loaded version of the model. To automatically compute" " the appropriate device map, you should upgrade your `accelerate` library," "`pip install --upgrade accelerate` or install it from source to support fp4 auto device map" "calculation. You may encounter unexpected behavior, or pass your own device map" ) def check_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: dict[str, Any], **kwargs, ) -> bool: import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if isinstance(module._parameters.get(tensor_name, None), bnb.nn.Params4bit): # Add here check for loaded components' dtypes once serialization is implemented return True elif isinstance(module, bnb.nn.Linear4bit) and tensor_name == "bias": # bias could be loaded by regular set_module_tensor_to_device() from accelerate, # but it would wrongly use uninitialized weight there. return True else: return False def create_quantized_param( self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, target_device: "torch.device", state_dict: dict[str, Any], unexpected_keys: Optional[list[str]] = None, ): """ combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device() """ import bitsandbytes as bnb module, tensor_name = get_module_from_name(model, param_name) if tensor_name not in module._parameters: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") old_value = getattr(module, tensor_name) # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if isinstance(target_device, int) and is_torch_npu_available(): target_device = f"npu:{target_device}" if tensor_name == "bias": if param_value is None: new_value = old_value.to(target_device) else: new_value = param_value.to(target_device) new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad) module._parameters[tensor_name] = new_value return if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit): raise ValueError("this function only loads `Linear4bit components`") if ( old_value.device == torch.device("meta") and target_device not in ["meta", torch.device("meta")] and param_value is None ): raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.") # construct `new_value` for the module._parameters[tensor_name]: if self.pre_quantized: # 4bit loading. Collecting components for restoring quantized weight # This can be expanded to make a universal call for any quantized weight loading if not self.is_serializable: raise ValueError( "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and ( param_name + ".quant_state.bitsandbytes__nf4" not in state_dict ): raise ValueError( f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components." ) quantized_stats = {} for k, v in state_dict.items(): if param_name + "." in k: quantized_stats[k] = v if unexpected_keys is not None and k in unexpected_keys: unexpected_keys.remove(k) param_kwargs = {} if self.is_bnb_supports_quant_storage_module: param_kwargs["module"] = module new_value = bnb.nn.Params4bit.from_prequantized( data=param_value, quantized_stats=quantized_stats, requires_grad=False, device=target_device, **param_kwargs, ) else: new_value = param_value.to("cpu") # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, Conv1D): new_value = new_value.T kwargs = old_value.__dict__ new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device) module._parameters[tensor_name] = new_value # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.adjust_max_memory def adjust_max_memory(self, max_memory: dict[str, Union[int, str]]) -> dict[str, Union[int, str]]: # need more space for buffers that are created during quantization max_memory = {key: val * 0.90 for key, val in max_memory.items()} return max_memory # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_dtype def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype": if dtype is None: # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` logger.info( "Overriding dtype=%s with `dtype=torch.float16` due to " "requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. " "Pass your own dtype to specify the dtype of the remaining non-linear layers or pass" " dtype=torch.float16 to remove this warning.", dtype, ) dtype = torch.float16 return dtype def update_device_map(self, device_map): if device_map is None: if torch.cuda.is_available(): device_map = {"": torch.cuda.current_device()} elif is_torch_npu_available(): device_map = {"": f"npu:{torch.npu.current_device()}"} elif is_torch_hpu_available(): device_map = {"": f"hpu:{torch.hpu.current_device()}"} elif is_torch_xpu_available(): device_map = {"": torch.xpu.current_device()} else: device_map = {"": "cpu"} logger.info( "The device_map was not initialized. " f"Setting device_map to {device_map}. " "If you want to use the model for inference, please set device_map ='auto' " ) return device_map # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_before_weight_loading def _process_model_before_weight_loading( self, model: "PreTrainedModel", device_map, keep_in_fp32_modules: Optional[list[str]] = None, **kwargs, ): from ..integrations import replace_with_bnb_linear llm_int8_enable_fp32_cpu_offload = self.quantization_config.llm_int8_enable_fp32_cpu_offload self.modules_to_not_convert = self.get_modules_to_not_convert( model, self.quantization_config.llm_int8_skip_modules, keep_in_fp32_modules ) # Extend `self.modules_to_not_convert` to keys that are supposed to be offloaded to `cpu` or `disk` if isinstance(device_map, dict) and len(device_map.keys()) > 1: keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] if len(keys_on_cpu) > 0 and not llm_int8_enable_fp32_cpu_offload: raise ValueError( "If you want to offload some keys to `cpu` or `disk`, you need to set " "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be " " converted to 8-bit but kept in 32-bit." ) self.modules_to_not_convert.extend(keys_on_cpu) model = replace_with_bnb_linear( model, modules_to_not_convert=self.modules_to_not_convert, quantization_config=self.quantization_config ) # TODO: consider bringing replace_with_bnb_linear() code from ..integrations/bitsandbyter.py to here model.config.quantization_config = self.quantization_config # Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer._process_model_after_weight_loading with 8bit->4bit def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs): model.is_loaded_in_4bit = True model.is_4bit_serializable = self.is_serializable() return model def is_serializable(self, safe_serialization=None): _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.41.3") if not _is_4bit_serializable: logger.warning( "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. " "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed." ) return False return True @cached_property def is_bnb_supports_quant_storage_module(self) -> bool: """ determines if the current version of bitsandbytes supports the `module` parameter in `Params4bit.from_prequantized` :return: """ return version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.43.3") @property def is_trainable(self) -> bool: return True def _dequantize(self, model): from ..integrations import dequantize_and_replace model = dequantize_and_replace( model, self.modules_to_not_convert, quantization_config=self.quantization_config ) return model
transformers/src/transformers/quantizers/quantizer_bnb_4bit.py/0
{ "file_path": "transformers/src/transformers/quantizers/quantizer_bnb_4bit.py", "repo_id": "transformers", "token_count": 7087 }
557
# Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch-independent utilities for the Trainer class. """ import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, NamedTuple, Optional, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_hpu_available, is_torch_mlu_available, is_torch_mps_available, is_torch_musa_available, is_torch_npu_available, is_torch_xla_available, is_torch_xpu_available, requires_backends, ) if is_torch_available(): import torch def seed_worker(worker_id: int, num_workers: int, rank: int): """ Helper function to set worker seed during Dataloader initialization. """ init_seed = torch.initial_seed() % 2**32 worker_seed = num_workers * rank + init_seed set_seed(worker_seed) def enable_full_determinism(seed: int, warn_only: bool = False): """ Helper function for reproducible behavior during distributed training. See - https://pytorch.org/docs/stable/notes/randomness.html for pytorch - https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow """ # set seed first set_seed(seed) if is_torch_available(): # Enable PyTorch deterministic mode. This potentially requires either the environment # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, # depending on the CUDA version, so we set them both here os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" # The environment variable required to enable deterministic mode on Ascend NPUs. os.environ["ASCEND_LAUNCH_BLOCKING"] = "1" os.environ["HCCL_DETERMINISTIC"] = "1" os.environ["FLASH_ATTENTION_DETERMINISTIC"] = "1" torch.use_deterministic_algorithms(True, warn_only=warn_only) # Enable CUDNN deterministic mode torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if is_tf_available(): import tensorflow as tf tf.config.experimental.enable_op_determinism() def set_seed(seed: int, deterministic: bool = False): """ Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed). Args: seed (`int`): The seed to set. deterministic (`bool`, *optional*, defaults to `False`): Whether to use deterministic algorithms where available. Can slow down training. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) # ^^ safe to call this function even if cuda is not available if deterministic: torch.use_deterministic_algorithms(True) if is_torch_mlu_available(): torch.mlu.manual_seed_all(seed) if is_torch_musa_available(): torch.musa.manual_seed_all(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) if is_torch_hpu_available(): torch.hpu.manual_seed_all(seed) if is_torch_xpu_available(): torch.xpu.manual_seed_all(seed) if is_tf_available(): import tensorflow as tf tf.random.set_seed(seed) if deterministic: tf.config.experimental.enable_op_determinism() def neftune_post_forward_hook(module, input, output): """ Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding layers. This method is slightly adapted from the original source code that can be found here: https://github.com/neelsjain/NEFTune Simply add it to your model as follows: ```python model = ... model.embed_tokens.neftune_noise_alpha = 0.1 model.embed_tokens.register_forward_hook(neftune_post_forward_hook) ``` Args: module (`torch.nn.Module`): The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to the desired noise alpha value. input (`torch.Tensor`): The input tensor to the model. output (`torch.Tensor`): The output tensor of the model (i.e. the embeddings). """ if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output class EvalPrediction: """ Evaluation output (always contains labels), to be used to compute metrics. Parameters: predictions (`np.ndarray`): Predictions of the model. label_ids (`np.ndarray`): Targets to be matched. inputs (`np.ndarray`, *optional*): Input data passed to the model. losses (`np.ndarray`, *optional*): Loss values computed during evaluation. """ def __init__( self, predictions: Union[np.ndarray, tuple[np.ndarray]], label_ids: Union[np.ndarray, tuple[np.ndarray]], inputs: Optional[Union[np.ndarray, tuple[np.ndarray]]] = None, losses: Optional[Union[np.ndarray, tuple[np.ndarray]]] = None, ): self.predictions = predictions self.label_ids = label_ids self.inputs = inputs self.losses = losses self.elements = (self.predictions, self.label_ids) if self.inputs is not None: self.elements += (self.inputs,) if self.losses is not None: self.elements += (self.losses,) def __iter__(self): return iter(self.elements) def __getitem__(self, idx): if idx < 0 or idx >= len(self.elements): raise IndexError("tuple index out of range") return self.elements[idx] class EvalLoopOutput(NamedTuple): predictions: Union[np.ndarray, tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, tuple[np.ndarray]]] metrics: Optional[dict[str, float]] num_samples: Optional[int] class PredictionOutput(NamedTuple): predictions: Union[np.ndarray, tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, tuple[np.ndarray]]] metrics: Optional[dict[str, float]] class TrainOutput(NamedTuple): global_step: int training_loss: float metrics: dict[str, float] PREFIX_CHECKPOINT_DIR = "checkpoint" _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) class IntervalStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class SaveStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" BEST = "best" class EvaluationStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class HubStrategy(ExplicitEnum): END = "end" EVERY_SAVE = "every_save" CHECKPOINT = "checkpoint" ALL_CHECKPOINTS = "all_checkpoints" class BestRun(NamedTuple): """ The best run found by a hyperparameter search (see [`~Trainer.hyperparameter_search`]). Parameters: run_id (`str`): The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending with run-{run_id}). objective (`float`): The objective that was obtained for this run. hyperparameters (`dict[str, Any]`): The hyperparameters picked to get this run. run_summary (`Optional[Any]`): A summary of tuning experiments. `ray.tune.ExperimentAnalysis` object for Ray backend. """ run_id: str objective: Union[float, list[float]] hyperparameters: dict[str, Any] run_summary: Optional[Any] = None def default_compute_objective(metrics: dict[str, float]) -> float: """ The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no metrics are provided to the [`Trainer`], the sum of all metrics otherwise. Args: metrics (`dict[str, float]`): The metrics returned by the evaluate method. Return: `float`: The objective to minimize or maximize """ metrics = copy.deepcopy(metrics) loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) # Remove speed metrics speed_metrics = [ m for m in metrics if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") ] for sm in speed_metrics: _ = metrics.pop(sm, None) return loss if len(metrics) == 0 else sum(metrics.values()) def default_hp_space_optuna(trial) -> dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), } def default_hp_space_ray(trial) -> dict[str, float]: from .integrations import is_ray_tune_available assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), } def default_hp_space_sigopt(trial): return [ {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformation": "log"}, {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, { "categorical_values": ["4", "8", "16", "32", "64"], "name": "per_device_train_batch_size", "type": "categorical", }, ] def default_hp_space_wandb(trial) -> dict[str, float]: from .integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") return { "method": "random", "metric": {"name": "objective", "goal": "minimize"}, "parameters": { "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, }, } class HPSearchBackend(ExplicitEnum): OPTUNA = "optuna" RAY = "ray" SIGOPT = "sigopt" WANDB = "wandb" def is_main_process(local_rank): """ Whether or not the current process is the local process, based on `xr.global_ordinal()` (for TPUs) first, then on `local_rank`. """ if is_torch_xla_available(): import torch_xla.runtime as xr return xr.global_ordinal() == 0 return local_rank in [-1, 0] def total_processes_number(local_rank): """ Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs. """ if is_torch_xla_available(): import torch_xla.runtime as xr return xr.world_size() elif local_rank != -1 and is_torch_available(): import torch return torch.distributed.get_world_size() return 1 def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): """ Measure and return speed performance metrics. This function requires a time snapshot `start_time` before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed. Args: - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed - num_steps: number of steps processed - num_tokens: number of tokens processed """ runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if runtime == 0: return result if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) if num_tokens is not None: tokens_per_second = num_tokens / runtime result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) return result class SchedulerType(ExplicitEnum): """ Scheduler names for the parameter `lr_scheduler_type` in [`TrainingArguments`]. By default, it uses "linear". Internally, this retrieves `get_linear_schedule_with_warmup` scheduler from [`Trainer`]. Scheduler types: - "linear" = get_linear_schedule_with_warmup - "cosine" = get_cosine_schedule_with_warmup - "cosine_with_restarts" = get_cosine_with_hard_restarts_schedule_with_warmup - "polynomial" = get_polynomial_decay_schedule_with_warmup - "constant" = get_constant_schedule - "constant_with_warmup" = get_constant_schedule_with_warmup - "inverse_sqrt" = get_inverse_sqrt_schedule - "reduce_lr_on_plateau" = get_reduce_on_plateau_schedule - "cosine_with_min_lr" = get_cosine_with_min_lr_schedule_with_warmup - "warmup_stable_decay" = get_wsd_schedule """ LINEAR = "linear" COSINE = "cosine" COSINE_WITH_RESTARTS = "cosine_with_restarts" POLYNOMIAL = "polynomial" CONSTANT = "constant" CONSTANT_WITH_WARMUP = "constant_with_warmup" INVERSE_SQRT = "inverse_sqrt" REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" COSINE_WITH_MIN_LR = "cosine_with_min_lr" COSINE_WARMUP_WITH_MIN_LR = "cosine_warmup_with_min_lr" WARMUP_STABLE_DECAY = "warmup_stable_decay" class TrainerMemoryTracker: """ A helper class that tracks cpu and gpu memory. This class will silently skip unless `psutil` is available. Install with `pip install psutil`. When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage. Example : ```python self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() # code ... metrics = {"train_runtime": 10.5} self._memory_tracker.stop_and_update_metrics(metrics) ``` At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`. To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`]. """ # map trainer methods to metrics prefix stages = { "__init__": "init", "train": "train", "_inner_training_loop": "train", "evaluate": "eval", "predict": "test", } def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if not is_psutil_available(): # soft dependency on psutil self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil # noqa if is_torch_cuda_available() or is_torch_mlu_available() or is_torch_musa_available(): import torch self.torch = torch self.gpu = {} elif is_torch_mps_available(): import torch self.torch = torch self.gpu = {} elif is_torch_xpu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_npu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_hpu_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): """derives the stage/caller name automatically""" caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller in self.stages: return self.stages[caller] else: raise ValueError( f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" ) def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = -1 while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def start(self): """start tracking for the caller's stage""" if self.skip_memory_metrics: return stage = self.derive_stage() # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return self.cur_stage = stage gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() elif is_torch_mlu_available(): self.torch.mlu.reset_peak_memory_stats() self.torch.mlu.empty_cache() elif is_torch_musa_available(): self.torch.musa.reset_peak_memory_stats() self.torch.musa.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.reset_peak_memory_stats() self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.reset_peak_memory_stats() self.torch.npu.empty_cache() elif is_torch_hpu_available(): self.torch.hpu.reset_peak_memory_stats() # not available on hpu as it reserves all device memory for the current process # self.torch.hpu.empty_cache() elif is_torch_mps_available(): self.torch.mps.empty_cache() # gpu if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() elif is_torch_mlu_available(): self.gpu_mem_used_at_start = self.torch.mlu.memory_allocated() elif is_torch_musa_available(): self.gpu_mem_used_at_start = self.torch.musa.memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() elif is_torch_hpu_available(): self.gpu_mem_used_at_start = self.torch.hpu.memory_allocated() elif is_torch_mps_available(): self.gpu_mem_used_at_start = self.torch.mps.current_allocated_memory() # cpu self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): """stop tracking for the passed stage""" # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # this sends a signal to peak_monitor_func to complete its loop self.peak_monitoring = False # first ensure all objects get collected and their memory is freed gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.empty_cache() elif is_torch_mlu_available(): self.torch.mlu.empty_cache() elif is_torch_musa_available(): self.torch.musa.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.empty_cache() elif is_torch_hpu_available(): # not available on hpu as it reserves all device memory for the current process # self.torch.npu.empty_cache() pass elif is_torch_mps_available(): self.torch.mps.empty_cache() # concepts: # - alloc_delta: the difference of allocated memory between the end and the start # - peaked_delta: the difference between the peak memory and the current memory # in order to know how much memory the measured code consumed one needs to sum these two # gpu if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() elif is_torch_mlu_available(): self.gpu_mem_used_now = self.torch.mlu.memory_allocated() self.gpu_mem_used_peak = self.torch.mlu.max_memory_allocated() elif is_torch_musa_available(): self.gpu_mem_used_now = self.torch.musa.memory_allocated() self.gpu_mem_used_peak = self.torch.musa.max_memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_now = self.torch.npu.memory_allocated() self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() elif is_torch_hpu_available(): self.gpu_mem_used_now = self.torch.hpu.memory_allocated() self.gpu_mem_used_peak = self.torch.hpu.max_memory_allocated() elif is_torch_mps_available(): self.gpu_mem_used_now = self.torch.mps.current_allocated_memory() # self.torch.mps.max_memory_allocated() does not exist yet self.gpu_mem_used_peak = None else: raise ValueError("No available GPU device found!") self.gpu[self.cur_stage] = { "begin": self.gpu_mem_used_at_start, "end": self.gpu_mem_used_now, "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), } if self.gpu_mem_used_peak is not None: self.gpu[self.cur_stage]["peaked"] = max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now) else: self.gpu[self.cur_stage]["peaked"] = "Not available" # cpu self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = { "begin": self.cpu_mem_used_at_start, "end": self.cpu_mem_used_now, "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), } # reset - cycle finished self.cur_stage = None def update_metrics(self, stage, metrics): """updates the metrics""" if self.skip_memory_metrics: return # deal with nested calls of eval during train - simply ignore those if self.cur_stage is not None and self.cur_stage != stage: return # since we don't have a way to return init metrics, we push them into the first of train/val/predict stages = [stage] if not self.init_reported: stages.insert(0, "init") self.init_reported = True for stage in stages: for t in ["alloc", "peaked"]: if stage in self.cpu and t in self.cpu[stage]: metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] # if we need additional debug info, enable the following # for t in ["begin", "end"]: # if stage in self.cpu and t in self.cpu[stage]: # metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t] # if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: # metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t] # since memory can be allocated before init, and it might be difficult to track overall # memory usage, in particular for GPU, let's report memory usage at the point init was called if stages[0] == "init": metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] if self.torch is not None: metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] # if we also wanted to report any additional memory allocations in between init and # whatever the next stage was we could also report this: # if self.cpu["init"]["end"] != self.cpu[stage]["begin"]: # metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"] # if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]: # metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"] def stop_and_update_metrics(self, metrics=None): """combine stop and metrics update in one call for simpler code""" if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) # init doesn't have metrics to update so we just save that data for later stages to retrieve if metrics is not None: self.update_metrics(stage, metrics) def has_length(dataset): """ Checks if the dataset implements __len__() and it doesn't raise an error """ try: return len(dataset) is not None except TypeError: # TypeError: len() of unsized object return False except AttributeError: # Ray DataSets raises an AttributeError: https://github.com/ray-project/ray/blob/master/python/ray/data/dataset.py#L5616 return False def denumpify_detensorize(metrics): """ Recursively calls `.item()` on the element of the dictionary passed """ if isinstance(metrics, (list, tuple)): return type(metrics)(denumpify_detensorize(m) for m in metrics) elif isinstance(metrics, dict): return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) elif isinstance(metrics, np.generic): return metrics.item() elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: return metrics.item() return metrics def number_of_arguments(func): """ Return the number of arguments of the passed function, even if it's a partial function. """ if isinstance(func, functools.partial): total_args = len(inspect.signature(func.func).parameters) return total_args - len(func.args) - len(func.keywords) return len(inspect.signature(func).parameters) def find_executable_batch_size( function: Optional[callable] = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False ): """ Args: A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is multiplied by 0.9 and passed to `function`. `function` must take in a `batch_size` parameter as its first argument. function (`callable`, *optional*) A function to wrap starting_batch_size (`int`, *optional*) The batch size to try and fit into memory auto_find_batch_size (`bool`, *optional*) If False, will just execute `function` """ if function is None: return functools.partial( find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size, ) if auto_find_batch_size: requires_backends(find_executable_batch_size, "accelerate") from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size) class FSDPOption(ExplicitEnum): FULL_SHARD = "full_shard" SHARD_GRAD_OP = "shard_grad_op" NO_SHARD = "no_shard" HYBRID_SHARD = "hybrid_shard" HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" OFFLOAD = "offload" AUTO_WRAP = "auto_wrap" class RemoveColumnsCollator: """Wrap the data collator to remove unused columns before they are passed to the collator.""" def __init__( self, data_collator, signature_columns, logger=None, model_name: Optional[str] = None, description: Optional[str] = None, ): self.data_collator = data_collator self.signature_columns = signature_columns self.logger = logger self.description = description self.model_name = model_name self.message_logged = False def _remove_columns(self, feature: dict) -> dict: if not isinstance(feature, dict): return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0: dset_description = "" if self.description is None else f"in the {self.description} set" self.logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " " you can safely ignore this message." ) self.message_logged = True return {k: v for k, v in feature.items() if k in self.signature_columns} def __call__(self, features: list[dict]): features = [self._remove_columns(feature) for feature in features] return self.data_collator(features) def check_target_module_exists(optim_target_modules, key: str, return_is_regex: bool = False): """A helper method to check if the passed module's key name matches any of the target modules in the optim_target_modules. Args: optim_target_modules (`Union[str, list[str]]`): A list of strings to try to match. Can be also a full string. key (`str`): A key to search any matches in optim_target_modules return_is_regex (`bool`): If set to `True`, the method will return whether the passed `optim_target_modules` is a regex or not. Returns: `bool` : True of match object if key matches any target modules from config, False or None if no match found `bool` : If the matched target module is a regex to silence out the warnings in Trainer for extra modules being found (only if `target_module_found=True` for an array of regex). """ target_module_found = False is_regex = False if isinstance(optim_target_modules, str): target_module_found = bool(re.fullmatch(optim_target_modules, key)) is_regex = optim_target_modules != key elif key in optim_target_modules: # from here, target_module_found must be a list of str # this module is specified directly in target_modules target_module_found = True elif any(target_key in key for target_key in optim_target_modules): target_module_found = True elif any(bool(re.fullmatch(optim_target_module, key)) for optim_target_module in optim_target_modules): target_module_found = True is_regex = True if return_is_regex: return target_module_found, is_regex return target_module_found
transformers/src/transformers/trainer_utils.py/0
{ "file_path": "transformers/src/transformers/trainer_utils.py", "repo_id": "transformers", "token_count": 14617 }
558
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class MistralCommonTokenizer(metaclass=DummyObject): _backends = ["mistral-common"] def __init__(self, *args, **kwargs): requires_backends(self, ["mistral-common"])
transformers/src/transformers/utils/dummy_mistral_common_objects.py/0
{ "file_path": "transformers/src/transformers/utils/dummy_mistral_common_objects.py", "repo_id": "transformers", "token_count": 107 }
559
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Hub utilities: utilities related to download and cache models """ import json import os import re import sys import tempfile import warnings from concurrent import futures from pathlib import Path from typing import Optional, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( _CACHED_NO_EXIST, CommitOperationAdd, ModelCard, ModelCardData, constants, create_branch, create_commit, create_repo, hf_hub_download, hf_hub_url, list_repo_tree, snapshot_download, try_to_load_from_cache, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, GatedRepoError, HfHubHTTPError, LocalEntryNotFoundError, OfflineModeIsEnabled, RepositoryNotFoundError, RevisionNotFoundError, build_hf_headers, get_session, hf_raise_for_status, send_telemetry, ) from requests.exceptions import HTTPError from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) LEGACY_PROCESSOR_CHAT_TEMPLATE_FILE = "chat_template.json" CHAT_TEMPLATE_FILE = "chat_template.jinja" CHAT_TEMPLATE_DIR = "additional_chat_templates" logger = logging.get_logger(__name__) # pylint: disable=invalid-name _is_offline_mode = huggingface_hub.constants.HF_HUB_OFFLINE def is_offline_mode(): return _is_offline_mode torch_cache_home = os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) default_cache_path = constants.default_cache_path # Determine default cache directory. Lots of legacy environment variables to ensure backward compatibility. # The best way to set the cache path is with the environment variable HF_HOME. For more details, check out this # documentation page: https://huggingface.co/docs/huggingface_hub/package_reference/environment_variables. # # In code, use `HF_HUB_CACHE` as the default cache path. This variable is set by the library and is guaranteed # to be set to the right value. # # TODO: clean this for v5? PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", constants.HF_HUB_CACHE) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(constants.HF_HOME, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex # Add deprecation warning for old environment variables. for key in ("PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "TRANSFORMERS_CACHE"): if os.getenv(key) is not None: warnings.warn( f"Using `{key}` is deprecated and will be removed in v5 of Transformers. Use `HF_HOME` instead.", FutureWarning, ) S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" _staging_mode = os.environ.get("HUGGINGFACE_CO_STAGING", "NO").upper() in ENV_VARS_TRUE_VALUES _default_endpoint = "https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co" HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT) HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" def _get_cache_file_to_return( path_or_repo_id: str, full_filename: str, cache_dir: Union[str, Path, None] = None, revision: Optional[str] = None, repo_type: Optional[str] = None, ): # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache( path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision, repo_type=repo_type ) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file return None def list_repo_templates( repo_id: str, *, local_files_only: bool, revision: Optional[str] = None, cache_dir: Optional[str] = None, ) -> list[str]: """List template files from a repo. A template is a jinja file located under the `additional_chat_templates/` folder. If working in offline mode or if internet is down, the method will list jinja template from the local cache - if any. """ if not local_files_only: try: return [ entry.path.removeprefix(f"{CHAT_TEMPLATE_DIR}/") for entry in list_repo_tree( repo_id=repo_id, revision=revision, path_in_repo=CHAT_TEMPLATE_DIR, recursive=False, ) if entry.path.endswith(".jinja") ] except (GatedRepoError, RepositoryNotFoundError, RevisionNotFoundError): raise # valid errors => do not catch except (HTTPError, requests.exceptions.ConnectionError): pass # offline mode, internet down, etc. => try local files # check local files try: snapshot_dir = snapshot_download( repo_id=repo_id, revision=revision, cache_dir=cache_dir, local_files_only=True ) except LocalEntryNotFoundError: # No local repo means no local files return [] templates_dir = Path(snapshot_dir, CHAT_TEMPLATE_DIR) if not templates_dir.is_dir(): return [] return [entry.stem for entry in templates_dir.iterdir() if entry.is_file() and entry.name.endswith(".jinja")] def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def define_sagemaker_information(): try: instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() dlc_container_used = instance_data["Image"] dlc_tag = instance_data["Image"].split(":")[1] except Exception: dlc_container_used = None dlc_tag = None sagemaker_params = json.loads(os.getenv("SM_FRAMEWORK_PARAMS", "{}")) runs_distributed_training = "sagemaker_distributed_dataparallel_enabled" in sagemaker_params account_id = os.getenv("TRAINING_JOB_ARN").split(":")[4] if "TRAINING_JOB_ARN" in os.environ else None sagemaker_object = { "sm_framework": os.getenv("SM_FRAMEWORK_MODULE", None), "sm_region": os.getenv("AWS_REGION", None), "sm_number_gpu": os.getenv("SM_NUM_GPUS", "0"), "sm_number_cpu": os.getenv("SM_NUM_CPUS", "0"), "sm_distributed_training": runs_distributed_training, "sm_deep_learning_container": dlc_container_used, "sm_deep_learning_container_tag": dlc_tag, "sm_account_id": account_id, } return sagemaker_object def http_user_agent(user_agent: Union[dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if constants.HF_HUB_DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]) -> Optional[str]: """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, **kwargs, ) -> Optional[str]: """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. repo_type (`str`, *optional*): Specify the repo type (useful when downloading from a space for instance). <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("google-bert/bert-base-uncased", "pytorch_model.bin") ``` """ file = cached_files(path_or_repo_id=path_or_repo_id, filenames=[filename], **kwargs) file = file[0] if file is not None else file return file def cached_files( path_or_repo_id: Union[str, os.PathLike], filenames: list[str], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: Optional[bool] = None, proxies: Optional[dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", repo_type: Optional[str] = None, user_agent: Optional[Union[str, dict[str, str]]] = None, _raise_exceptions_for_gated_repo: bool = True, _raise_exceptions_for_missing_entries: bool = True, _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, **deprecated_kwargs, ) -> Optional[str]: """ Tries to locate several files in a local folder and repo, downloads and cache them if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filenames (`list[str]`): The name of all the files to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. repo_type (`str`, *optional*): Specify the repo type (useful when downloading from a space for instance). Private args: _raise_exceptions_for_gated_repo (`bool`): if False, do not raise an exception for gated repo error but return None. _raise_exceptions_for_missing_entries (`bool`): if False, do not raise an exception for missing entries but return None. _raise_exceptions_for_connection_errors (`bool`): if False, do not raise an exception for connection errors but return None. _commit_hash (`str`, *optional*): passed when we are chaining several calls to various files (e.g. when loading a tokenizer or a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. <Tip> Passing `token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("google-bert/bert-base-uncased", "pytorch_model.bin") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if subfolder is None: subfolder = "" # Add folder to filenames full_filenames = [os.path.join(subfolder, file) for file in filenames] path_or_repo_id = str(path_or_repo_id) existing_files = [] for filename in full_filenames: if os.path.isdir(path_or_repo_id): resolved_file = os.path.join(path_or_repo_id, filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries and filename != os.path.join(subfolder, "config.json"): revision_ = "main" if revision is None else revision raise OSError( f"{path_or_repo_id} does not appear to have a file named {filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/tree/{revision_}' for available files." ) else: return None existing_files.append(resolved_file) # All files exist if len(existing_files) == len(full_filenames): return existing_files if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) existing_files = [] file_counter = 0 if _commit_hash is not None and not force_download: for filename in full_filenames: # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache( path_or_repo_id, filename, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: file_counter += 1 existing_files.append(resolved_file) elif not _raise_exceptions_for_missing_entries: file_counter += 1 else: raise OSError(f"Could not locate {filename} inside {path_or_repo_id}.") # Either all the files were found, or some were _CACHED_NO_EXIST but we do not raise for missing entries if file_counter == len(full_filenames): return existing_files if len(existing_files) > 0 else None user_agent = http_user_agent(user_agent) # download the files if needed try: if len(full_filenames) == 1: # This is slightly better for only 1 file hf_hub_download( path_or_repo_id, filenames[0], subfolder=None if len(subfolder) == 0 else subfolder, repo_type=repo_type, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) else: snapshot_download( path_or_repo_id, allow_patterns=full_filenames, repo_type=repo_type, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, token=token, local_files_only=local_files_only, ) except Exception as e: # We cannot recover from them if isinstance(e, RepositoryNotFoundError) and not isinstance(e, GatedRepoError): raise OSError( f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token " "having permission to this repo either by logging in with `hf auth login` or by passing " "`token=<your_token>`" ) from e elif isinstance(e, RevisionNotFoundError): raise OSError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) from e elif isinstance(e, PermissionError): raise OSError( f"PermissionError at {e.filename} when downloading {path_or_repo_id}. " "Check cache directory permissions. Common causes: 1) another user is downloading the same model (please wait); " "2) a previous download was canceled and the lock file needs manual removal." ) from e # Now we try to recover if we can find all files correctly in the cache resolved_files = [ _get_cache_file_to_return(path_or_repo_id, filename, cache_dir, revision, repo_type) for filename in full_filenames ] if all(file is not None for file in resolved_files): return resolved_files # Raise based on the flags. Note that we will raise for missing entries at the very end, even when # not entering this Except block, as it may also happen when `snapshot_download` does not raise if isinstance(e, GatedRepoError): if not _raise_exceptions_for_gated_repo: return None raise OSError( "You are trying to access a gated repo.\nMake sure to have access to it at " f"https://huggingface.co/{path_or_repo_id}.\n{str(e)}" ) from e elif isinstance(e, LocalEntryNotFoundError): if not _raise_exceptions_for_connection_errors: return None # Here we only raise if both flags for missing entry and connection errors are True (because it can be raised # even when `local_files_only` is True, in which case raising for connections errors only would not make sense) elif _raise_exceptions_for_missing_entries: raise OSError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load the files, and couldn't find them in the" f" cached files.\nCheck your internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) from e # snapshot_download will not raise EntryNotFoundError, but hf_hub_download can. If this is the case, it will be treated # later on anyway and re-raised if needed elif isinstance(e, HTTPError) and not isinstance(e, EntryNotFoundError): if not _raise_exceptions_for_connection_errors: return None raise OSError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{e}") from e # Any other Exception type should now be re-raised, in order to provide helpful error messages and break the execution flow # (EntryNotFoundError will be treated outside this block and correctly re-raised if needed) elif not isinstance(e, EntryNotFoundError): raise e resolved_files = [ _get_cache_file_to_return(path_or_repo_id, filename, cache_dir, revision) for filename in full_filenames ] # If there are any missing file and the flag is active, raise if any(file is None for file in resolved_files) and _raise_exceptions_for_missing_entries: missing_entries = [original for original, resolved in zip(full_filenames, resolved_files) if resolved is None] # Last escape if len(resolved_files) == 1 and missing_entries[0] == os.path.join(subfolder, "config.json"): return None # Now we raise for missing entries revision_ = "main" if revision is None else revision msg = ( f"a file named {missing_entries[0]}" if len(missing_entries) == 1 else f"files named {(*missing_entries,)}" ) raise OSError( f"{path_or_repo_id} does not appear to have {msg}. Checkout 'https://huggingface.co/{path_or_repo_id}/tree/{revision_}'" " for available files." ) # Remove potential missing entries (we can silently remove them at this point based on the flags) resolved_files = [file for file in resolved_files if file is not None] # Return `None` if the list is empty, coherent with other Exception when the flag is not active resolved_files = None if len(resolved_files) == 0 else resolved_files return resolved_files def download_url(url, proxies=None): """ Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded. """ warnings.warn( f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" " that this is not compatible with the caching system (your file will be downloaded at each execution) or" " multiple processes (each process will download the file in a different temporary file).", FutureWarning, ) tmp_fd, tmp_file = tempfile.mkstemp() with os.fdopen(tmp_fd, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[dict[str, str]] = None, token: Optional[Union[bool, str]] = None, *, local_files_only: bool = False, cache_dir: Union[str, Path, None] = None, repo_type: Optional[str] = None, **deprecated_kwargs, ): """ Checks if a repo contains a given file without downloading it. Works for remote repos and local folders. If offline mode is enabled, checks if the file exists in the cache. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token # If path to local directory, check if the file exists if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) # Else it's a repo => let's check if the file exists in local cache or on the Hub # Check if file exists in cache # This information might be outdated so it's best to also make a HEAD call (if allowed). cached_path = try_to_load_from_cache( repo_id=path_or_repo, filename=filename, revision=revision, repo_type=repo_type, cache_dir=cache_dir, ) has_file_in_cache = isinstance(cached_path, str) # If local_files_only, don't try the HEAD call if local_files_only: return has_file_in_cache # Check if the file exists try: response = get_session().head( hf_hub_url(path_or_repo, filename=filename, revision=revision, repo_type=repo_type), headers=build_hf_headers(token=token, user_agent=http_user_agent()), allow_redirects=False, proxies=proxies, timeout=10, ) except (requests.exceptions.SSLError, requests.exceptions.ProxyError): # Actually raise for those subclasses of ConnectionError raise except ( requests.exceptions.ConnectionError, requests.exceptions.Timeout, OfflineModeIsEnabled, ): return has_file_in_cache try: hf_raise_for_status(response) return True except GatedRepoError as e: logger.error(e) raise OSError( f"{path_or_repo} is a gated repository. Make sure to request access at " f"https://huggingface.co/{path_or_repo} and pass a token having permission to this repo either by " "logging in with `hf auth login` or by passing `token=<your_token>`." ) from e except RepositoryNotFoundError as e: logger.error(e) raise OSError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") from e except RevisionNotFoundError as e: logger.error(e) raise OSError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) from e except EntryNotFoundError: return False # File does not exist except requests.HTTPError: # Any authentication/authorization error will be caught here => default to cache return has_file_in_cache class PushToHubMixin: """ A Mixin containing the functionality to push a model or tokenizer to the hub. """ def _create_repo( self, repo_id: str, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, repo_url: Optional[str] = None, organization: Optional[str] = None, ) -> str: """ Create the repo if needed, cleans up repo_id with deprecated kwargs `repo_url` and `organization`, retrieves the token. """ if repo_url is not None: warnings.warn( "The `repo_url` argument is deprecated and will be removed in v5 of Transformers. Use `repo_id` " "instead." ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_url` are both specified. Please set only the argument `repo_id`." ) repo_id = repo_url.replace(f"{HUGGINGFACE_CO_RESOLVE_ENDPOINT}/", "") if organization is not None: warnings.warn( "The `organization` argument is deprecated and will be removed in v5 of Transformers. Set your " "organization directly in the `repo_id` passed instead (`repo_id={organization}/{model_id}`)." ) if not repo_id.startswith(organization): if "/" in repo_id: repo_id = repo_id.split("/")[-1] repo_id = f"{organization}/{repo_id}" url = create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True) return url.repo_id def _get_files_timestamps(self, working_dir: Union[str, os.PathLike]): """ Returns the list of files with their last modification timestamp. """ return {f: os.path.getmtime(os.path.join(working_dir, f)) for f in os.listdir(working_dir)} def _upload_modified_files( self, working_dir: Union[str, os.PathLike], repo_id: str, files_timestamps: dict[str, float], commit_message: Optional[str] = None, token: Optional[Union[bool, str]] = None, create_pr: bool = False, revision: Optional[str] = None, commit_description: Optional[str] = None, ): """ Uploads all modified files in `working_dir` to `repo_id`, based on `files_timestamps`. """ if commit_message is None: if "Model" in self.__class__.__name__: commit_message = "Upload model" elif "Config" in self.__class__.__name__: commit_message = "Upload config" elif "Tokenizer" in self.__class__.__name__: commit_message = "Upload tokenizer" elif "FeatureExtractor" in self.__class__.__name__: commit_message = "Upload feature extractor" elif "Processor" in self.__class__.__name__: commit_message = "Upload processor" else: commit_message = f"Upload {self.__class__.__name__}" modified_files = [ f for f in os.listdir(working_dir) if f not in files_timestamps or os.path.getmtime(os.path.join(working_dir, f)) > files_timestamps[f] ] # filter for actual files + folders at the root level modified_files = [ f for f in modified_files if os.path.isfile(os.path.join(working_dir, f)) or os.path.isdir(os.path.join(working_dir, f)) ] operations = [] # upload standalone files for file in modified_files: if os.path.isdir(os.path.join(working_dir, file)): # go over individual files of folder for f in os.listdir(os.path.join(working_dir, file)): operations.append( CommitOperationAdd( path_or_fileobj=os.path.join(working_dir, file, f), path_in_repo=os.path.join(file, f) ) ) else: operations.append( CommitOperationAdd(path_or_fileobj=os.path.join(working_dir, file), path_in_repo=file) ) if revision is not None and not revision.startswith("refs/pr"): try: create_branch(repo_id=repo_id, branch=revision, token=token, exist_ok=True) except HfHubHTTPError as e: if e.response.status_code == 403 and create_pr: # If we are creating a PR on a repo we don't have access to, we can't create the branch. # so let's assume the branch already exists. If it's not the case, an error will be raised when # calling `create_commit` below. pass else: raise logger.info(f"Uploading the following files to {repo_id}: {','.join(modified_files)}") return create_commit( repo_id=repo_id, operations=operations, commit_message=commit_message, commit_description=commit_description, token=token, create_pr=create_pr, revision=revision, ) def push_to_hub( self, repo_id: str, use_temp_dir: Optional[bool] = None, commit_message: Optional[str] = None, private: Optional[bool] = None, token: Optional[Union[bool, str]] = None, max_shard_size: Optional[Union[int, str]] = "5GB", create_pr: bool = False, safe_serialization: bool = True, revision: Optional[str] = None, commit_description: Optional[str] = None, tags: Optional[list[str]] = None, **deprecated_kwargs, ) -> str: """ Upload the {object_files} to the 🤗 Model Hub. Parameters: repo_id (`str`): The name of the repository you want to push your {object} to. It should contain your organization name when pushing to a given organization. use_temp_dir (`bool`, *optional*): Whether or not to use a temporary directory to store the files saved before they are pushed to the Hub. Will default to `True` if there is no directory named like `repo_id`, `False` otherwise. commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload {object}"`. private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `hf auth login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"5GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). We default it to `"5GB"` so that users can easily load models on free-tier Google Colab instances without any CPU OOM issues. create_pr (`bool`, *optional*, defaults to `False`): Whether or not to create a PR with the uploaded files or directly commit. safe_serialization (`bool`, *optional*, defaults to `True`): Whether or not to convert the model weights in safetensors format for safer serialization. revision (`str`, *optional*): Branch to push the uploaded files to. commit_description (`str`, *optional*): The description of the commit that will be created tags (`list[str]`, *optional*): List of tags to push on the Hub. Examples: ```python from transformers import {object_class} {object} = {object_class}.from_pretrained("google-bert/bert-base-cased") # Push the {object} to your namespace with the name "my-finetuned-bert". {object}.push_to_hub("my-finetuned-bert") # Push the {object} to an organization with the name "my-finetuned-bert". {object}.push_to_hub("huggingface/my-finetuned-bert") ``` """ use_auth_token = deprecated_kwargs.pop("use_auth_token", None) ignore_metadata_errors = deprecated_kwargs.pop("ignore_metadata_errors", False) save_jinja_files = deprecated_kwargs.pop( "save_jinja_files", None ) # TODO: This is only used for testing and should be removed once save_jinja_files becomes the default if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token repo_path_or_name = deprecated_kwargs.pop("repo_path_or_name", None) if repo_path_or_name is not None: # Should use `repo_id` instead of `repo_path_or_name`. When using `repo_path_or_name`, we try to infer # repo_id from the folder path, if it exists. warnings.warn( "The `repo_path_or_name` argument is deprecated and will be removed in v5 of Transformers. Use " "`repo_id` instead.", FutureWarning, ) if repo_id is not None: raise ValueError( "`repo_id` and `repo_path_or_name` are both specified. Please set only the argument `repo_id`." ) if os.path.isdir(repo_path_or_name): # repo_path: infer repo_id from the path repo_id = repo_path_or_name.split(os.path.sep)[-1] working_dir = repo_id else: # repo_name: use it as repo_id repo_id = repo_path_or_name working_dir = repo_id.split("/")[-1] else: # Repo_id is passed correctly: infer working_dir from it working_dir = repo_id.split("/")[-1] # Deprecation warning will be sent after for repo_url and organization repo_url = deprecated_kwargs.pop("repo_url", None) organization = deprecated_kwargs.pop("organization", None) repo_id = self._create_repo( repo_id, private=private, token=token, repo_url=repo_url, organization=organization ) # Create a new empty model card and eventually tag it model_card = create_and_tag_model_card( repo_id, tags, token=token, ignore_metadata_errors=ignore_metadata_errors ) if use_temp_dir is None: use_temp_dir = not os.path.isdir(working_dir) with working_or_temp_dir(working_dir=working_dir, use_temp_dir=use_temp_dir) as work_dir: files_timestamps = self._get_files_timestamps(work_dir) # Save all files. if save_jinja_files: self.save_pretrained( work_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization, save_jinja_files=True, ) else: self.save_pretrained(work_dir, max_shard_size=max_shard_size, safe_serialization=safe_serialization) # Update model card if needed: model_card.save(os.path.join(work_dir, "README.md")) return self._upload_modified_files( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token, create_pr=create_pr, revision=revision, commit_description=commit_description, ) def send_example_telemetry(example_name, *example_args, framework="pytorch"): """ Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. """ if is_offline_mode(): return data = {"example": example_name, "framework": framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} if "model_name_or_path" in args_as_dict: model_name = args_as_dict["model_name_or_path"] # Filter out local paths if not os.path.isdir(model_name): data["model_name"] = args_as_dict["model_name_or_path"] if "dataset_name" in args_as_dict: data["dataset_name"] = args_as_dict["dataset_name"] elif "task_name" in args_as_dict: # Extract script name from the example_name script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" # Send telemetry in the background send_telemetry( topic="examples", library_name="transformers", library_version=__version__, user_agent=http_user_agent(data) ) def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ if isinstance(size, int): return size if size.upper().endswith("GIB"): return int(size[:-3]) * (2**30) if size.upper().endswith("MIB"): return int(size[:-3]) * (2**20) if size.upper().endswith("KIB"): return int(size[:-3]) * (2**10) if size.upper().endswith("GB"): int_size = int(size[:-2]) * (10**9) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("MB"): int_size = int(size[:-2]) * (10**6) return int_size // 8 if size.endswith("b") else int_size if size.upper().endswith("KB"): int_size = int(size[:-2]) * (10**3) return int_size // 8 if size.endswith("b") else int_size raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.") def get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, force_download=False, proxies=None, resume_download=None, local_files_only=False, token=None, user_agent=None, revision=None, subfolder="", _commit_hash=None, **deprecated_kwargs, ): """ For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ import json use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") token = use_auth_token if not os.path.isfile(index_filename): raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") with open(index_filename) as f: index = json.loads(f.read()) shard_filenames = sorted(set(index["weight_map"].values())) sharded_metadata = index["metadata"] sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) sharded_metadata["weight_map"] = index["weight_map"].copy() # First, let's deal with local folder. if os.path.isdir(pretrained_model_name_or_path): shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames] return shard_filenames, sharded_metadata # At this stage pretrained_model_name_or_path is a model identifier on the Hub. Try to get everything from cache, # or download the files cached_filenames = cached_files( pretrained_model_name_or_path, shard_filenames, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=_commit_hash, ) return cached_filenames, sharded_metadata def create_and_tag_model_card( repo_id: str, tags: Optional[list[str]] = None, token: Optional[str] = None, ignore_metadata_errors: bool = False, ): """ Creates or loads an existing model card and tags it. Args: repo_id (`str`): The repo_id where to look for the model card. tags (`list[str]`, *optional*): The list of tags to add in the model card token (`str`, *optional*): Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token. ignore_metadata_errors (`bool`, *optional*, defaults to `False`): If True, errors while parsing the metadata section will be ignored. Some information might be lost during the process. Use it at your own risk. """ try: # Check if the model card is present on the remote repo model_card = ModelCard.load(repo_id, token=token, ignore_metadata_errors=ignore_metadata_errors) except EntryNotFoundError: # Otherwise create a simple model card from template model_description = "This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated." card_data = ModelCardData(tags=[] if tags is None else tags, library_name="transformers") model_card = ModelCard.from_template(card_data, model_description=model_description) if tags is not None: # Ensure model_card.data.tags is a list and not None if model_card.data.tags is None: model_card.data.tags = [] for model_tag in tags: if model_tag not in model_card.data.tags: model_card.data.tags.append(model_tag) return model_card class PushInProgress: """ Internal class to keep track of a push in progress (which might contain multiple `Future` jobs). """ def __init__(self, jobs: Optional[futures.Future] = None) -> None: self.jobs = [] if jobs is None else jobs def is_done(self): return all(job.done() for job in self.jobs) def wait_until_done(self): futures.wait(self.jobs) def cancel(self) -> None: self.jobs = [ job for job in self.jobs # Cancel the job if it wasn't started yet and remove cancelled/done jobs from the list if not (job.cancel() or job.done()) ]
transformers/src/transformers/utils/hub.py/0
{ "file_path": "transformers/src/transformers/utils/hub.py", "repo_id": "transformers", "token_count": 21710 }
560
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to add a new example script in 🤗 Transformers This folder provide a template for adding a new example script implementing a training or inference task with the models in the 🤗 Transformers library. To use it, you will need to install cookiecutter: ```bash pip install cookiecutter ``` or refer to the installation page of the [cookiecutter documentation](https://cookiecutter.readthedocs.io/). You can then run the following command inside the `examples` folder of the transformers repo: ```bash cookiecutter ../templates/adding_a_new_example_script/ ``` and answer the questions asked, which will generate a new folder where you will find a pre-filled template for your example following the best practices we recommend for them. Adjust the way the data is preprocessed, the model is loaded or the Trainer is instantiated then when you're happy, add a `README.md` in the folder (or complete the existing one if you added a script to an existing folder) telling a user how to run your script. Make a PR to the 🤗 Transformers repo. Don't forget to tweet about your new example with a carbon screenshot of how to run it and tag @huggingface!
transformers/templates/adding_a_new_example_script/README.md/0
{ "file_path": "transformers/templates/adding_a_new_example_script/README.md", "repo_id": "transformers", "token_count": 444 }
561
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import subprocess from os.path import dirname from parameterized import parameterized from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, get_tests_dir, require_deepspeed, require_torch_accelerator, run_first, slow, torch_device, ) from transformers.trainer_utils import set_seed if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, get_regression_trainer, ) set_seed(42) FIXTURE_DIRECTORY = get_tests_dir("fixtures") ROOT_DIRECTORY = os.path.join(dirname(get_tests_dir())) DS_TESTS_DIRECTORY = dirname(os.path.abspath(__file__)) # default torch.distributed port DEFAULT_MASTER_PORT = "10999" T5_SMALL = "google-t5/t5-small" # *** Working Models *** ALBERT_TINY = "hf-internal-testing/tiny-albert" BART_TINY = "sshleifer/bart-tiny-random" BERT_TINY = "hf-internal-testing/tiny-bert" BIGBIRD_PEGASUS_TINY = "hf-internal-testing/tiny-random-bigbird_pegasus" BIG_BIRD_TINY = "hf-internal-testing/tiny-random-big_bird" BLENDERBOT_TINY = "hf-internal-testing/tiny-random-blenderbot" BLOOM_TINY = "bigscience/bigscience-small-testing" DEBERTA_TINY = "hf-internal-testing/tiny-random-deberta" DEBERTA_V2_TINY = "hf-internal-testing/tiny-random-deberta-v2" DISTILBERT_TINY = "sshleifer/tiny-distilbert-base-cased" ELECTRA_TINY = "hf-internal-testing/tiny-electra" FLAUBERT_TINY = "hf-internal-testing/tiny-random-flaubert" FSMT_TINY = "stas/tiny-wmt19-en-de" FUNNEL_TINY = "hf-internal-testing/tiny-random-funnel" GPT2_TINY = "sshleifer/tiny-gpt2" GPTJ_TINY = "hf-internal-testing/tiny-random-gptj" GPT_NEO_TINY = "hf-internal-testing/tiny-random-gpt_neo" LAYOUTLM_TINY = "hf-internal-testing/tiny-layoutlm" LED_TINY = "hf-internal-testing/tiny-random-led" LONGFORMER_TINY = "hf-internal-testing/tiny-random-longformer" M2M_100_TINY = "stas/tiny-m2m_100" # hf tiny model is unsuitable MARIAN_TINY = "sshleifer/tiny-marian-en-de" MBART_TINY = "sshleifer/tiny-mbart" MOBILEBERT_TINY = "hf-internal-testing/tiny-random-mobilebert" MPNET_TINY = "hf-internal-testing/tiny-random-mpnet" PEGASUS_TINY = "stas/pegasus-cnn_dailymail-tiny-random" PROPHETNET_TINY = "hf-internal-testing/tiny-random-prophetnet" ROBERTA_TINY = "sshleifer/tiny-distilroberta-base" SQUEEZEBERT_TINY = "hf-internal-testing/tiny-random-squeezebert" T5_TINY = "patrickvonplaten/t5-tiny-random" T5_V1_TINY = "hf-internal-testing/tiny-random-t5-v1.1" VIT_TINY = "hf-internal-testing/tiny-random-vit" XLM_ROBERTA_TINY = "hf-internal-testing/tiny-xlm-roberta" XLNET_TINY = "sshleifer/tiny-xlnet-base-cased" # *** To Fix *** # *** tiny model issues *** # missing model files: MT5_TINY = "hf-internal-testing/tiny-random-mt5" CAMEMBERT_TINY = "hf-internal-testing/tiny-random-camembert" OPENAI_GPT_TINY = "hf-internal-testing/tiny-random-openai-gpt" # missing tokenizer files CONVBERT_TINY = "hf-internal-testing/tiny-random-convbert" LAYOUTLMV2_TINY = "hf-internal-testing/tiny-random-layoutlmv2" HUBERT_TINY = "hf-internal-testing/tiny-random-hubert" # issues with tokenizer CTRL_TINY = "hf-internal-testing/tiny-random-ctrl" TRANSFO_XL_TINY = "hf-internal-testing/tiny-random-transfo-xl" # same as Salesforce/ctrl # other issues with tiny models IBERT_TINY = "hf-internal-testing/tiny-random-ibert" # multiple issues with either mlm/qa/clas REFORMER_TINY = "hf-internal-testing/tiny-random-reformer" # multiple issues with either mlm/qa/clas # *** Lacking official examples to test with *** # or not working with examples DPR_TINY = "hf-internal-testing/tiny-random-dpr" # - "dpr" examples/research_projects/rag-end2end-retriever/ RAG_TINY = "hf-internal-testing/tiny-random-rag" # - "rag" research_projects LUKE_TINY = "" # - "luke" Entities classes - no plan to make such example LXMERT_TINY = "hf-internal-testing/tiny-random-lxmert" # - "lxmert" doesn't work with run_qa.py CLIP_TINY = "hf-internal-testing/tiny-random-clip" # - "clip" nothing under pytorch examples - XXX: Suraj is working on adding some - check by end of Sep SPEECH_TO_TEXT_TINY = "hf-internal-testing/tiny-random-speech_to_text" # - "speech_to_text", nothing under pytorch examples # *** Reactive mode *** # models with low usage, unstable API, things about to change - do nothing about the following until someone runs into a problem TAPAS_TINY = "hf-internal-testing/tiny-random-tapas" # additional notes on tapas # 1. "Table must be of type pd.DataFrame" failure # TODO: new models to add: # def get_launcher(distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) return f"deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}".split() def make_task_cmds(): data_dir_samples = f"{FIXTURE_DIRECTORY}/tests_samples" data_dir_wmt = f"{data_dir_samples}/wmt_en_ro" data_dir_xsum = f"{data_dir_samples}/xsum" args_main = """ --do_train --max_train_samples 4 --per_device_train_batch_size 2 --num_train_epochs 1 --fp16 --report_to none --overwrite_output_dir """.split() # try to cover as many models as possible once (it's enough to run on one task per model) # but need a tiny model for each # # should have "{model_type.upper()}_TINY" corresponding vars defined, e.g., T5_TINY, etc. tasks2models = { "trans": [ "bart", "fsmt", "m2m_100", "marian", "mbart", "t5", "t5_v1", # "mt5", missing model files ], "sum": [ "pegasus", ], "clm": [ "big_bird", "bigbird_pegasus", "blenderbot", "bloom", "gpt2", "gpt_neo", "gptj", "xlm-roberta", "prophetnet", # "camembert", missing model files ], "mlm": [ "albert", "deberta", "deberta-v2", "distilbert", "electra", "flaubert", "funnel", "layoutlm", # "reformer", # multiple issues with either mlm/qa/clas ], "qa": [ "led", "longformer", "mobilebert", "mpnet", "roberta", "squeezebert", # "convbert", # missing tokenizer files # "layoutlmv2", missing model files ], "clas": [ "bert", "xlnet", # "hubert", # missing tokenizer files # "ibert", # multiple issues with either mlm/qa/clas # "transfo-xl", # tokenizer issues as Salesforce/ctrl # "Salesforce/ctrl", # tokenizer issues # "openai-community/openai-gpt", missing model files # "tapas", multiple issues ], "img_clas": [ "vit", ], } scripts_dir = f"{ROOT_DIRECTORY}/examples/pytorch" tasks = { "trans": f""" {scripts_dir}/translation/run_translation.py --train_file {data_dir_wmt}/train.json --source_lang en --target_lang ro --max_source_length 12 --max_target_length 12 """, "sum": f""" {scripts_dir}/summarization/run_summarization.py --train_file {data_dir_xsum}/sample.json --max_source_length 12 --max_target_length 12 --lang en """, "clm": f""" {scripts_dir}/language-modeling/run_clm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt --block_size 8 """, "mlm": f""" {scripts_dir}/language-modeling/run_mlm.py --train_file {FIXTURE_DIRECTORY}/sample_text.txt """, "qa": f""" {scripts_dir}/question-answering/run_qa.py --train_file {data_dir_samples}/SQUAD/sample.json """, "clas": f""" {scripts_dir}/text-classification/run_glue.py --train_file {data_dir_samples}/MRPC/train.csv --max_seq_length 12 --task_name MRPC """, "img_clas": f""" {scripts_dir}/image-classification/run_image_classification.py --dataset_name hf-internal-testing/cats_vs_dogs_sample --remove_unused_columns False --max_steps 10 --image_processor_name {DS_TESTS_DIRECTORY}/vit_feature_extractor.json --label_column_name labels """, } launcher = get_launcher(distributed=True) cmds = {} for task, args in tasks.items(): args = args.split() for model in tasks2models[task]: model_name = globals()[f"{model.upper().replace('-', '_')}_TINY"] args_model = f"--model_name_or_path {model_name}".split() cmds[f"{task}_{model}"] = launcher + args + args_model + args_main # # generation special case # if task == "gen": # launcher = f"deepspeed --num_nodes 1 --num_gpus 1".split() # args_model += f"--model_type {model}".split() # cmds[f"{task}_{model}"] = launcher + args + args_model # else: return cmds task_cmds = make_task_cmds() ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] # future preparation: # for now test just fp16, as these tests are quite slow # FP16 = "fp16" # BF16 = "bf16" # # dtypes = [FP16] # so just hardcoding --fp16 for now # if is_torch_bf16_gpu_available(): # dtypes += [BF16] def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, task_cmds.keys())) @slow @run_first @require_deepspeed @require_torch_accelerator class TestDeepSpeedModelZoo(TestCasePlus): """This class is for testing via an external script - can do multiple gpus""" def get_task_cmd(self, task, stage): # return a ready to run train cmd if task not in task_cmds: raise ValueError(f"don't know of task {task}, have {task_cmds.keys()}") cmd = task_cmds[task] args_ds = f"--deepspeed {self.test_file_dir_str}/ds_config_{stage}.json".split() output_dir = self.get_auto_remove_tmp_dir() args_out = f"--output_dir {output_dir}".split() cmd += args_ds + args_out return cmd, output_dir @parameterized.expand(params, name_func=parameterized_custom_name_func) def test_zero_to_fp32(self, stage, task): # testing the ability to do a run followed by recovery of full fp32 weights cmd, output_dir = self.get_task_cmd(task, stage) # 1. generate the checkpoint cmd += "--save_steps 1".split() # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] + cmd)); die execute_subprocess_async(cmd, env=self.get_env()) # 2. test that the fp32 weights get reconsolidated chkpt_dir = f"{output_dir}/checkpoint-1" recovered_model_path = f"{chkpt_dir}/out.bin" cmd = f"{chkpt_dir}/zero_to_fp32.py {chkpt_dir} {recovered_model_path}" # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die subprocess.check_call(cmd, shell=True) assert os.path.exists(recovered_model_path), f"{recovered_model_path} was not found" # possibly could also test that the resulting saved model is usable but given that we use # random models we won't know if it's any good
transformers/tests/deepspeed/test_model_zoo.py/0
{ "file_path": "transformers/tests/deepspeed/test_model_zoo.py", "repo_id": "transformers", "token_count": 5768 }
562
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from packaging import version from transformers import AlbertConfig, AutoTokenizer, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) class AlbertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=32, embedding_size=8, hidden_size=12, num_hidden_layers=2, # this needs to be the same as `num_hidden_layers`! num_hidden_groups=2, num_attention_heads=4, intermediate_size=16, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=8, type_vocab_size=2, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return AlbertConfig( vocab_size=self.vocab_size, embedding_size=self.embedding_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, inner_group_num=1, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, sentence_order_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = AlbertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = AlbertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AlbertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["sentence_order_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = AlbertModelTester(self) self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "albert/albert-base-v1" model = AlbertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class AlbertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = AlbertModel.from_pretrained("albert/albert-base-v2") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) @slow @pytest.mark.torch_export_test def test_export(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") distilbert_model = "albert/albert-base-v2" device = "cpu" attn_implementation = "sdpa" max_length = 64 tokenizer = AutoTokenizer.from_pretrained(distilbert_model) inputs = tokenizer( f"Paris is the {tokenizer.mask_token} of France.", return_tensors="pt", padding="max_length", max_length=max_length, ) model = AlbertForMaskedLM.from_pretrained( distilbert_model, device_map=device, attn_implementation=attn_implementation, ) logits = model(**inputs).logits eg_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices) self.assertEqual( eg_predicted_mask.split(), ["capital", "capitol", "comune", "arrondissement", "bastille"], ) exported_program = torch.export.export( model, args=(inputs["input_ids"],), kwargs={"attention_mask": inputs["attention_mask"]}, strict=True, ) result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) ep_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices) self.assertEqual(eg_predicted_mask, ep_predicted_mask)
transformers/tests/models/albert/test_modeling_albert.py/0
{ "file_path": "transformers/tests/models/albert/test_modeling_albert.py", "repo_id": "transformers", "token_count": 6859 }
563
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Audio Spectrogram Transformer (AST) model.""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class ASTModelTester: def __init__( self, parent, batch_size=13, patch_size=2, max_length=24, num_mel_bins=16, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, frequency_stride=2, time_stride=2, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.max_length = max_length self.num_mel_bins = num_mel_bins self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.frequency_stride = frequency_stride self.time_stride = time_stride self.attn_implementation = attn_implementation # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) frequency_out_dimension = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 time_out_dimension = (self.max_length - self.patch_size) // self.time_stride + 1 num_patches = frequency_out_dimension * time_out_dimension self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, input_values, labels def get_config(self): return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, input_values, labels): model = ASTModel(config=config) model.to(torch_device) model.eval() result = model(input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_values, labels, ) = config_and_inputs inputs_dict = {"input_values": input_values} return config, inputs_dict @require_torch class ASTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as AST does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "AudioClassificationPipelineTests": return True return False def setUp(self): self.model_tester = ASTModelTester(self) self.config_tester = ConfigTester(self, config_class=ASTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "MIT/ast-finetuned-audioset-10-10-0.4593" model = ASTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on some audio from AudioSet def prepare_audio(): filepath = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" ) audio, sampling_rate = torchaudio.load(filepath) return audio, sampling_rate @require_torch @require_torchaudio class ASTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593") if is_torchaudio_available() else None ) @slow def test_inference_audio_classification(self): feature_extractor = self.default_feature_extractor model = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(torch_device) feature_extractor = self.default_feature_extractor audio, sampling_rate = prepare_audio() audio = audio.squeeze().numpy() inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 527)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py/0
{ "file_path": "transformers/tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py", "repo_id": "transformers", "token_count": 3995 }
564
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import BitImageProcessor if is_torchvision_available(): from transformers import BitImageProcessorFast class BitImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def expected_output_image_shape(self, images): return self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BitImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BitImageProcessor if is_vision_available() else None fast_image_processing_class = BitImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
transformers/tests/models/bit/test_image_processing_bit.py/0
{ "file_path": "transformers/tests/models/bit/test_image_processing_bit.py", "repo_id": "transformers", "token_count": 2103 }
565
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch BLIP-2 model.""" import copy import inspect import tempfile import unittest import numpy as np import pytest import requests from transformers import CONFIG_MAPPING, Blip2Config, Blip2QFormerConfig, Blip2VisionConfig from transformers.testing_utils import ( Expectations, cleanup, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_gpu, require_torch_multi_accelerator, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( Blip2ForConditionalGeneration, Blip2ForImageTextRetrieval, Blip2Model, Blip2TextModelWithProjection, Blip2VisionModel, Blip2VisionModelWithProjection, ) if is_vision_available(): from PIL import Image from transformers import Blip2Processor class Blip2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Blip2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = Blip2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as BLIP-2's vision encoder does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Blip2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Blip2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Blip2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="BLIP-2's vision encoder does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip2-opt-2.7b" model = Blip2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class Blip2QFormerModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, use_qformer_text_input=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id self.use_qformer_text_input = use_qformer_text_input def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Blip2QFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, use_qformer_text_input=self.use_qformer_text_input, ) # this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py class Blip2TextModelDecoderOnlyTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3) input_ids[:, -1] = self.eos_token_id # Eos Token attention_mask = input_ids.ne(self.pad_token_id) return config, input_ids, attention_mask def get_config(self): return CONFIG_MAPPING["opt"]( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) # this model tester uses a decoder-only language model (OPT) class Blip2ForConditionalGenerationDecoderOnlyModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10, image_token_index=4, ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelDecoderOnlyTester(parent, **text_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length + num_query_tokens # need seq_length for common tests self.is_training = is_training self.num_query_tokens = num_query_tokens self.image_token_index = image_token_index def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() _, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_tokens = ( torch.ones((input_ids.shape[0], self.num_query_tokens), device=torch_device, dtype=input_ids.dtype) * self.image_token_index ) input_ids[input_ids == self.image_token_index] = self.text_model_tester.pad_token_id input_ids = torch.cat([vision_tokens, input_ids], dim=-1) vision_attention_mask = torch.ones_like(vision_tokens) attention_mask = torch.cat([vision_attention_mask, attention_mask], dim=-1) config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, image_token_index=self.image_token_index, ) def create_and_check_for_conditional_generation(self, config, input_ids, attention_mask, pixel_values): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask) expected_seq_length = self.num_query_tokens + self.text_model_tester.seq_length self.parent.assertEqual( result.logits.shape, (self.vision_model_tester.batch_size, expected_seq_length, self.text_model_tester.vocab_size), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Blip2ForConditionalGenerationDecoderOnlyTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration,) if is_torch_available() else () additional_model_inputs = ["input_ids"] fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_torchscript = False _is_composite = True def setUp(self): self.model_tester = Blip2ForConditionalGenerationDecoderOnlyModelTester(self) common_properties = ["image_token_index", "num_query_tokens", "image_text_hidden_size"] self.config_tester = ConfigTester( self, config_class=Blip2Config, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip( reason="Blip2QFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." ) def test_eager_matches_sdpa_generate(self): pass @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model.language_model.config._attn_implementation == "sdpa") self.assertTrue(model.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model.qformer.config._attn_implementation == "eager") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.qformer.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Save Blip2Config and check if we can load Blip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Blip2Config and check if we can load Blip2QFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip2-opt-2.7b" model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) # overwrite because BLIP internally calls LM.generate() with embeds thus it cannot operate in no cache format def _check_generate_outputs(self, output, config, use_cache=False, num_return_sequences=1, num_beams=1): use_cache = True # force this to be True in case False is passed super()._check_generate_outputs( output, config, use_cache=use_cache, num_return_sequences=num_return_sequences, num_beams=num_beams ) # overwrite because BLIP2 cannot generate only from input ids, and requires pixel values in all cases to be present @pytest.mark.generate def test_left_padding_compatibility(self): # NOTE: left-padding results in small numerical differences. This is expected. # See https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535 # First, filter out models that don't support left padding # - The model must have generative capabilities if len(self.all_generative_model_classes) == 0: self.skipTest(reason="No generative architecture available for this model.") # - The model must support padding if not self.has_attentions: self.skipTest(reason="This model doesn't support padding.") # - The model must be a decoder-only architecture (encoder-based architectures use right-padding) decoder_only_classes = [] for model_class in self.all_generative_model_classes: config, _ = self.prepare_config_and_inputs_for_generate() if config.is_encoder_decoder: continue else: decoder_only_classes.append(model_class) if len(decoder_only_classes) == 0: self.skipTest(reason="No decoder-only architecture available for this model.") # - Decoder-only architectures derived from encoder-decoder models could support it in theory, but we haven't # added support for it yet. We skip these models for now. has_encoder_attributes = any( attr_name for attr_name in config.to_dict() if attr_name.startswith("encoder") and attr_name != "encoder_no_repeat_ngram_size" ) if has_encoder_attributes: self.skipTest( reason="The decoder-only derived from encoder-decoder models are not expected to support left-padding." ) # Then, test left-padding def _prepare_model_kwargs(input_ids, attention_mask, signature): model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} if "position_ids" in signature: position_ids = torch.cumsum(attention_mask, dim=-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) model_kwargs["position_ids"] = position_ids if "cache_position" in signature: cache_position = torch.arange(input_ids.shape[-1], device=torch_device) model_kwargs["cache_position"] = cache_position return model_kwargs for model_class in decoder_only_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict.get("attention_mask") pixel_values = inputs_dict["pixel_values"] if attention_mask is None: attention_mask = torch.ones_like(input_ids) model = model_class(config).to(torch_device).eval() signature = inspect.signature(model.forward).parameters.keys() # no cache as some models require special cache classes to be init outside forward model.generation_config.use_cache = False # Without padding model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, signature) next_logits_wo_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :] # With left-padding (length 32) # can hardcode pad_token to be 0 as we'll do attn masking anyway pad_token_id = ( config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0 ) pad_size = (input_ids.shape[0], 32) padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id padded_input_ids = torch.cat((padding, input_ids), dim=1) padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) model_kwargs = _prepare_model_kwargs(padded_input_ids, padded_attention_mask, signature) next_logits_with_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :] # They should result in very similar logits torch.testing.assert_close(next_logits_wo_padding, next_logits_with_padding, rtol=1e-5, atol=1e-5) # this class is based on `T5ModelTester` found in tests/models/t5/test_modeling_t5.py class Blip2TextModelTester: def __init__( self, parent, vocab_size=99, batch_size=12, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return CONFIG_MAPPING["t5"]( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, is_encoder_decoder=True, ) # this model tester uses an encoder-decoder language model (T5) class Blip2ModelTester: def __init__( self, parent, vision_kwargs=None, qformer_kwargs=None, text_kwargs=None, is_training=True, num_query_tokens=10, image_token_index=4, ): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {} if text_kwargs is None: text_kwargs = {} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.text_model_tester = Blip2TextModelTester(parent, **text_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length # need seq_length for common tests self.encoder_seq_length = self.text_model_tester.encoder_seq_length self.is_training = is_training self.num_query_tokens = num_query_tokens self.image_token_index = image_token_index def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() ( _, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, lm_labels def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), text_config=self.text_model_tester.get_config(), num_query_tokens=self.num_query_tokens, image_token_index=self.image_token_index, ) def create_and_check_for_conditional_generation( self, config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels ): model = Blip2ForConditionalGeneration(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.vocab_size, ), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, pixel_values, decoder_input_ids, decoder_attention_mask, labels, ) = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_torch class Blip2ModelTest(ModelTesterMixin, PipelineTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForConditionalGeneration, Blip2Model) if is_torch_available() else () additional_model_inputs = ["input_ids", "decoder_input_ids"] pipeline_model_mapping = ( { "feature-extraction": Blip2Model, "image-to-text": Blip2ForConditionalGeneration, "visual-question-answering": Blip2ForConditionalGeneration, "image-text-to-text": Blip2ForConditionalGeneration, } if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False _is_composite = True # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "VisualQuestionAnsweringPipelineTests": # Get `RuntimeError: "LayerNormKernelImpl" not implemented for 'Half'`. return True return False def setUp(self): self.model_tester = Blip2ModelTester(self) common_properties = ["image_token_index", "num_query_tokens", "image_text_hidden_size"] self.config_tester = ConfigTester( self, config_class=Blip2Config, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() def test_for_conditional_generation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_conditional_generation(*config_and_inputs) @unittest.skip( reason="Blip2QFormerModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet." ) def test_eager_matches_sdpa_generate(self): pass @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): pass def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are called "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model.language_model.config._attn_implementation == "eager") self.assertTrue(model.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model.qformer.config._attn_implementation == "eager") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.language_model.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.qformer.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") def test_forward_signature(self): for model_class in self.all_model_classes: config, _ = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_load_vision_qformer_text_config(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Save Blip2Config and check if we can load Blip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Blip2Config and check if we can load Blip2QFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "Salesforce/blip2-opt-2.7b" model = Blip2ForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) def test_get_text_features(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = { "input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), "attention_mask": torch.LongTensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).to(torch_device), "decoder_input_ids": torch.LongTensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]).to(torch_device), } model = Blip2Model(config).to(torch_device) model.eval() text_features = model.get_text_features(**inputs_dict) self.assertEqual(text_features[0].shape, (1, 10, config.text_config.vocab_size)) def test_get_image_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() image_features = model.get_image_features(**inputs_dict) self.assertEqual( image_features[0].shape, ( self.model_tester.vision_model_tester.batch_size, self.model_tester.vision_model_tester.seq_length, config.vision_config.hidden_size, ), ) def test_get_qformer_features(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() keys_to_pop = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"] for key in keys_to_pop: inputs_dict.pop(key) model = Blip2Model(config).to(torch_device) model.eval() qformer_features = model.get_qformer_features(**inputs_dict) self.assertEqual( qformer_features[0].shape, (self.model_tester.vision_model_tester.batch_size, 10, config.vision_config.hidden_size), ) # override from common to deal with nested configurations (`vision_config`, `text_config` and `qformer_config`) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for key in ["vision_config", "qformer_config", "text_config"]: setattr(configs_no_init, key, _config_zero_init(getattr(configs_no_init, key))) for model_class in self.all_model_classes: model = model_class(config=copy.deepcopy(configs_no_init)) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip("T5 backbone deepcopies the configs, and fixing it would be more involved") def test_internal_model_config_and_subconfig_are_same(self): pass class Blip2TextModelWithProjectionTester: def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {"use_qformer_text_input": True} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.is_training = is_training self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), ) def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.qformer_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def create_and_check_model(self, config, input_ids, attention_mask): model = Blip2TextModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=attention_mask, output_attentions=True, output_hidden_states=True) self.parent.assertEqual( result.last_hidden_state.shape, (self.vision_model_tester.batch_size, input_ids.shape[1], self.qformer_model_tester.hidden_size), ) self.parent.assertEqual( result.text_embeds.shape, ( self.vision_model_tester.batch_size, input_ids.shape[1], config.image_text_hidden_size, ), ) with torch.no_grad(): result2 = model( input_ids, attention_mask=attention_mask, return_dict=not config.use_return_dict, output_attentions=True, output_hidden_states=True, ) self.parent.assertTrue(torch.allclose(result.text_embeds, result2[0])) self.parent.assertTrue(torch.allclose(result.last_hidden_state, result2[1])) self.parent.assertTrue(torch.allclose(result.hidden_states[0], result2[2][0])) self.parent.assertTrue(torch.allclose(result.hidden_states[1], result2[2][1])) self.parent.assertTrue(torch.allclose(result.attentions[0], result2[3][0])) self.parent.assertTrue(torch.allclose(result.attentions[1], result2[3][1])) @require_torch class Blip2TextModelWithProjectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2TextModelWithProjection,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2TextModelWithProjectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Blip2TextModelWithProjection does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Blip2TextModelWithProjection does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2TextModelWithProjection does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_ids", "attention_mask", "position_ids"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @slow @require_torch_accelerator def test_model_from_pretrained(self): model_name = "Salesforce/blip2-itm-vit-g" model = Blip2TextModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "text_projection")) _, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs() model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(input_ids=input_ids, attention_mask=attention_mask) self.assertEqual( outputs.text_embeds.shape, ( self.model_tester.qformer_model_tester.batch_size, input_ids.shape[1], model.config.image_text_hidden_size, ), ) class Blip2VisionModelWithProjectionTester: def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {"use_qformer_text_input": True} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.is_training = is_training self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.num_attention_heads = self.vision_model_tester.num_attention_heads self.seq_length = self.vision_model_tester.seq_length self.hidden_size = self.vision_model_tester.hidden_size self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), ) def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def create_and_check_model(self, config, pixel_values): model = Blip2VisionModelWithProjection(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values, output_attentions=True, output_hidden_states=True) self.parent.assertEqual( result.last_hidden_state.shape, ( self.vision_model_tester.batch_size, self.vision_model_tester.seq_length, self.qformer_model_tester.hidden_size, ), ) self.parent.assertEqual( result.image_embeds.shape, ( self.vision_model_tester.batch_size, config.vision_config.hidden_size, config.image_text_hidden_size, ), ) with torch.no_grad(): result2 = model( pixel_values, return_dict=not config.use_return_dict, output_attentions=True, output_hidden_states=True, ) self.parent.assertTrue(torch.allclose(result.image_embeds, result2[0])) self.parent.assertTrue(torch.allclose(result.last_hidden_state, result2[1])) self.parent.assertTrue(torch.allclose(result.hidden_states[0], result2[2][0])) self.parent.assertTrue(torch.allclose(result.hidden_states[1], result2[2][1])) self.parent.assertTrue(torch.allclose(result.attentions[0], result2[3][0])) self.parent.assertTrue(torch.allclose(result.attentions[1], result2[3][1])) @require_torch class Blip2VisionModelWithProjectionTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2VisionModelWithProjection,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False def setUp(self): self.model_tester = Blip2VisionModelWithProjectionTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Blip2VisionModelWithProjection does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Blip2VisionModelWithProjection does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @slow @require_torch_accelerator def test_model_from_pretrained(self): model_name = "Salesforce/blip2-itm-vit-g" model = Blip2VisionModelWithProjection.from_pretrained(model_name) self.assertIsNotNone(model) self.assertTrue(hasattr(model, "vision_projection")) _, pixel_values = self.model_tester.prepare_config_and_inputs() model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(pixel_values=pixel_values) self.assertEqual( outputs.image_embeds.shape, ( self.model_tester.vision_model_tester.batch_size, model.config.num_query_tokens, model.config.image_text_hidden_size, ), ) class Blip2TextRetrievalModelTester: def __init__(self, parent, vision_kwargs=None, qformer_kwargs=None, is_training=True): if vision_kwargs is None: vision_kwargs = {} if qformer_kwargs is None: qformer_kwargs = {"use_qformer_text_input": True} self.parent = parent self.vision_model_tester = Blip2VisionModelTester(parent, **vision_kwargs) self.qformer_model_tester = Blip2QFormerModelTester(parent, **qformer_kwargs) self.is_training = is_training self.batch_size = self.vision_model_tester.batch_size # need bs for batching_equivalence test def get_config(self): return Blip2Config.from_vision_qformer_text_configs( vision_config=self.vision_model_tester.get_config(), qformer_config=self.qformer_model_tester.get_config(), ) def prepare_config_and_inputs(self): _, input_ids, attention_mask = self.qformer_model_tester.prepare_config_and_inputs() _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = Blip2ForImageTextRetrieval(config).to(torch_device).eval() with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask, use_image_text_matching_head=True) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, 2), ) with torch.no_grad(): result = model(pixel_values, input_ids, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.qformer_model_tester.batch_size), ) self.parent.assertEqual( result.logits_per_text.shape, (self.qformer_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class Blip2TextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Blip2ForImageTextRetrieval,) if is_torch_available() else () additional_model_inputs = ["input_ids"] fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Blip2TextRetrievalModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Blip2ForImageTextRetrieval does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Blip2Model does not have input/output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "input_ids", "attention_mask"] expected_arg_names.extend( ["use_image_text_matching_head"] if "use_image_text_matching_head" in arg_names else [] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_load_vision_qformer_text_config(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Save Blip2Config and check if we can load Blip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Blip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Blip2Config and check if we can load Blip2QFormerConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) qformer_config = Blip2QFormerConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.qformer_config.to_dict(), qformer_config.to_dict()) @slow @require_torch_accelerator def test_model_from_pretrained(self): model_name = "Salesforce/blip2-itm-vit-g" model = Blip2ForImageTextRetrieval.from_pretrained(model_name) self.assertIsNotNone(model) _, input_ids, attention_mask, pixel_values = self.model_tester.prepare_config_and_inputs() model.to(torch_device) model.eval() with torch.no_grad(): outputs = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, use_image_text_matching_head=True, ) self.assertEqual(outputs.logits_per_image.shape, (self.model_tester.qformer_model_tester.batch_size, 2)) with torch.no_grad(): outputs = model( pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, ) self.assertEqual( outputs.logits_per_image.shape, (self.model_tester.vision_model_tester.batch_size, self.model_tester.qformer_model_tester.batch_size), ) @unittest.skip(reason="Training is not yet supported") def test_training(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Training is not yet supported") def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif name == "temp": self.assertAlmostEqual( param.data.item(), 0.07, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch @slow class Blip2ModelIntegrationTest(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) def test_inference_opt(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to( torch_device ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118] # fmt: skip self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual("a woman sitting on the beach with a dog", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) # max_length for BLIP includes prompt length from now on, use max_new_tokens predictions = model.generate(**inputs, max_new_tokens=11) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118] # fmt: skip self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual(generated_text, "Question: which city is this? Answer: it's not a city, it's a beach") def test_inference_interpolate_pos_encoding(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to( torch_device ) processor.image_processor.size = {"height": 500, "width": 500} image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs, interpolate_pos_encoding=True) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 8, 2335, 15, 5, 4105, 50118] # fmt: skip self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual(generated_text, "a woman and dog on the beach") def test_inference_opt_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", dtype=torch.float16).to( torch_device ) # prepare image image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) # Test output (in this case, slightly different from greedy search) expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118] # fmt: skip self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual(predictions[1].tolist(), expected_ids) def test_inference_t5(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", dtype=torch.float16).to( torch_device ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() expectations = Expectations( { ("xpu", 3): [ [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], "a woman is playing with her dog on the beach", ], ("cuda", 7): [ [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], "a woman is playing with her dog on the beach", ], } ) expected_outputs = expectations.get_expectation() # Test output self.assertEqual(predictions[0].tolist(), expected_outputs[0]) self.assertEqual(expected_outputs[1], generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() expectations = Expectations( { ("xpu", 3): [ [0, 3, 7, 152, 2515, 11389, 3523, 1], "san francisco", ], ("cuda", 7): [ [0, 3, 7, 152, 2515, 11389, 3523, 1], "san francisco", ], } ) expected_outputs = expectations.get_expectation() # Test output self.assertEqual(predictions[0].tolist(), expected_outputs[0]) self.assertEqual(generated_text, expected_outputs[1]) def test_inference_t5_batched_beam_search(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xl", dtype=torch.float16).to( torch_device ) # prepare image image = prepare_img() inputs = processor(images=[image, image], return_tensors="pt").to(torch_device, dtype=torch.float16) predictions = model.generate(**inputs, num_beams=2) expectations = Expectations( { ("xpu", 3): [ [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], ], ("cuda", 7): [ [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], ], } ) expected_predictions = expectations.get_expectation() # Test output (in this case, slightly different from greedy search) self.assertEqual(predictions[0].tolist(), expected_predictions[0]) self.assertEqual(predictions[1].tolist(), expected_predictions[1]) @require_torch_multi_accelerator def test_inference_opt_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b") model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-opt-2.7b", dtype=torch.float16, device_map="balanced" ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids = [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118] self.assertEqual(predictions[0].tolist(), [50265] * 32 + expected_ids) # 50265 is the img token id self.assertEqual("a woman sitting on the beach with a dog", generated_text) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(0, dtype=torch.float16) predictions = model.generate(**inputs, max_new_tokens=11) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids = [2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118] self.assertEqual(predictions[0].tolist(), [50265] * 32 + expected_ids) # 50265 is the img token id self.assertEqual(generated_text, "Question: which city is this? Answer: it's not a city, it's a beach") @require_torch_multi_accelerator def test_inference_t5_multi_accelerator(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") device_map = { "query_tokens": 0, "vision_model": 0, "language_model": 1, "language_projection": 0, "qformer": 0, } model = Blip2ForConditionalGeneration.from_pretrained( "Salesforce/blip2-flan-t5-xl", dtype=torch.float16, device_map=device_map ) # prepare image image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(f"{torch_device}:0", dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids_and_text = Expectations( { ("cuda", None): ([0, 2335, 1556, 28, 1782, 30, 8, 2608, 1], "woman playing with dog on the beach"), ("rocm", (9, 5)): ( [0, 3, 9, 2335, 19, 1556, 28, 160, 1782, 30, 8, 2608, 1], "a woman is playing with her dog on the beach", ), } ).get_expectation() self.assertEqual(predictions[0].tolist(), expected_ids_and_text[0]) self.assertEqual(generated_text, expected_ids_and_text[1]) # image and context prompt = "Question: which city is this? Answer:" inputs = processor(images=image, text=prompt, return_tensors="pt").to(f"{torch_device}:0", dtype=torch.float16) predictions = model.generate(**inputs) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output expected_ids_and_text = Expectations( { ("cuda", None): ([0, 3, 7, 152, 67, 839, 1], "san diego"), ("rocm", (9, 5)): ( [0, 3, 7, 152, 2515, 11389, 3523, 1], "san francisco", # TODO: check if this is ok ), } ).get_expectation() self.assertEqual(predictions[0].tolist(), expected_ids_and_text[0]) self.assertEqual(generated_text, expected_ids_and_text[1]) @require_torch_gpu def test_inference_itm(self): model_name = "Salesforce/blip2-itm-vit-g" processor = Blip2Processor.from_pretrained(model_name) model = Blip2ForImageTextRetrieval.from_pretrained(model_name).to(torch_device) image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(images=image, text=text, return_tensors="pt").to(torch_device) # forward pass out_itm = model(**inputs, use_image_text_matching_head=True) out = model(**inputs) # verify expected_scores = torch.Tensor([[0.0238, 0.9762]]) torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3) @require_torch_accelerator @require_torch_fp16 def test_inference_itm_fp16(self): model_name = "Salesforce/blip2-itm-vit-g" processor = Blip2Processor.from_pretrained(model_name) model = Blip2ForImageTextRetrieval.from_pretrained(model_name, dtype=torch.float16).to(torch_device) image = prepare_img() text = "A woman and her dog sitting in a beach" inputs = processor(images=image, text=text, return_tensors="pt").to(torch_device, dtype=torch.float16) # forward pass out_itm = model(**inputs, use_image_text_matching_head=True) out = model(**inputs) # verify expected_scores = torch.Tensor([[0.0239, 0.9761]]) torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu().float()), expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(out[0].cpu().float(), torch.Tensor([[0.4406]]), rtol=1e-3, atol=1e-3) @require_torch_accelerator @require_torch_fp16 def test_inference_vision_with_projection_fp16(self): model_name = "Salesforce/blip2-itm-vit-g" processor = Blip2Processor.from_pretrained(model_name) model = Blip2VisionModelWithProjection.from_pretrained(model_name, dtype=torch.float16).to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device, dtype=torch.float16) # forward pass out = model(**inputs) # verify expected_image_embeds = [ -0.093994140625, -0.075927734375, 0.031890869140625, 0.053009033203125, 0.0352783203125, -0.01190185546875, ] self.assertTrue(np.allclose(out.image_embeds[0][0][:6].tolist(), expected_image_embeds, atol=1e-3)) @require_torch_accelerator @require_torch_fp16 def test_inference_text_with_projection_fp16(self): model_name = "Salesforce/blip2-itm-vit-g" processor = Blip2Processor.from_pretrained(model_name) model = Blip2TextModelWithProjection.from_pretrained(model_name, dtype=torch.float16).to(torch_device) inputs = processor(text="a woman sitting on the beach with a dog", padding=True, return_tensors="pt").to( torch_device ) # forward pass out = model(**inputs) # verify expected_text_embeds = [ -0.1082763671875, 0.053192138671875, -0.02825927734375, 0.0169830322265625, 0.08648681640625, -0.04656982421875, ] self.assertTrue(np.allclose(out.text_embeds[0][0][:6].tolist(), expected_text_embeds, atol=1e-3))
transformers/tests/models/blip_2/test_modeling_blip_2.py/0
{ "file_path": "transformers/tests/models/blip_2/test_modeling_blip_2.py", "repo_id": "transformers", "token_count": 36686 }
566
# coding=utf-8 # Copyright 2024, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ConversationalSpeechModel model.""" import copy import unittest import pytest from parameterized import parameterized from transformers import ( AutoProcessor, CsmConfig, CsmForConditionalGeneration, is_torch_available, ) from transformers.testing_utils import ( cleanup, require_read_token, require_torch_accelerator, slow, torch_device, ) from transformers.utils.import_utils import is_datasets_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, ids_tensor, ) if is_datasets_available(): from datasets import load_dataset if is_torch_available(): import torch class CsmModelTester: def __init__( self, parent, ignore_index=-100, batch_size=3, seq_length=7, is_training=True, depth_decoder_config={ "num_codebooks": 10, "backbone_hidden_size": 64, "vocab_size": 6, "hidden_size": 64, "intermediate_size": 128, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 2, "hidden_act": "silu", "max_position_embeddings": 10, }, codec_config={ "model_type": "mimi", "audio_channels": 1, "chunk_in_sec": None, "hidden_size": 32, "num_filters": 8, "num_residual_layers": 1, "upsampling_ratios": [8, 4], "codebook_size": 64, "vector_quantization_hidden_dimension": 64, "upsample_groups": 32, "num_hidden_layers": 2, "num_attention_heads": 2, "num_key_value_heads": 2, "sliding_window": 4, "codebook_dim": 64, "use_cache": False, }, config={ "num_codebooks": 10, "vocab_size": 6, "text_vocab_size": 99, "hidden_size": 64, "intermediate_size": 64, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 2, "hidden_act": "silu", "max_position_embeddings": 10, "bos_token_id": 1, "pad_token_id": 2, "eos_token_id": 3, "codebook_pad_token_id": 2, "codebook_eos_token_id": 3, }, ): self.parent = parent self.is_training = is_training self.ignore_index = ignore_index self.depth_decoder_config = depth_decoder_config self.codec_config = codec_config self.config = config self.seq_length = seq_length self.batch_size = batch_size self.num_hidden_layers = config["num_hidden_layers"] self.vocab_size = config["vocab_size"] self.hidden_size = config["hidden_size"] self.num_attention_heads = config["num_attention_heads"] self.pad_token_id = config["pad_token_id"] def get_config(self): return CsmConfig( depth_decoder_config=self.depth_decoder_config, codec_config=self.codec_config, **self.config, ) def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length, config.num_codebooks], config.vocab_size - 1) + 1 attention_mask = input_ids[..., -1].ne(1).to(torch_device) return config, input_ids, attention_mask def prepare_config_and_inputs_for_common(self): config, input_ids, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict class CsmForConditionalGenerationTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (CsmForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_headmasking = False test_resize_embeddings = False test_resize_embeddings_untied = False def setUp(self): self.model_tester = CsmModelTester(self) self.config_tester = ConfigTester(self, config_class=CsmConfig) def test_config(self): self.config_tester.run_common_tests() def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): """ Overrides [ModelTesterMixin._prepare_for_class] to handle third input_ids dimension. """ inputs_dict = copy.deepcopy(inputs_dict) if return_labels: inputs_dict["labels"] = torch.zeros( ( self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.config["num_codebooks"], ), dtype=torch.long, device=torch_device, ) return inputs_dict def _get_logits_processor_kwargs(self, do_sample=False, config=None): """ Overrides [GenerationTesterMixin._get_logits_processor_kwargs] to restrict to top_k, top_p, and temperature sampling. """ logits_processor_kwargs = {} if do_sample: logits_processor_kwargs.update( { "top_k": 10, "top_p": 0.7, "temperature": 0.7, } ) return logits_processor_kwargs def test_initialization(self): """ Overrides [ModelTesterMixin.test_initialization] because of specificities of Mimi codec model. See https://github.com/huggingface/transformers/blob/1077603410cd73ba71d64a522033574d66d64b55/tests/models/mimi/test_modeling_mimi.py#L384-L397 """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv", "input_proj", "output_proj"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _check_similar_generate_outputs(self, output_1, output_2, atol=1e-5, rtol=1e-5): """ Overrides [GenerationTesterMixin._check_similar_generate_outputs] to handle third input_ids dimension. Here we only look a the first codebook (index 0 on last dimension of the generated sequences) since returned scores are for this token. """ # scores doesn't include data regarding decoder input tokens decoder_input_length = output_1.sequences.shape[1] - len(output_1.scores) output_matches = output_1.sequences[..., 0] == output_2.sequences[..., 0] has_matching_outputs = output_matches.all() has_matching_scores = None if not has_matching_outputs: for batch_idx in range(output_1.sequences.shape[0]): batch_matches = output_matches[batch_idx] if batch_matches.all(): continue first_mismatch_idx = batch_matches.int().argmin() # gets the index of the first False first_mismatch_idx -= decoder_input_length output_1_first_mismatch_scores = output_1.scores[first_mismatch_idx][batch_idx] output_2_first_mismatch_scores = output_2.scores[first_mismatch_idx][batch_idx] has_matching_scores = torch.allclose( output_1_first_mismatch_scores, output_2_first_mismatch_scores, rtol=atol, atol=rtol ) if not has_matching_scores: break self.assertTrue(has_matching_outputs or has_matching_scores) @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip(reason="CSM does not support assisted decoding.") def test_assisted_decoding_matches_greedy_search(self, assistant_type): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support assisted decoding.") def test_assisted_decoding_sample(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support Dola decoding.") def test_dola_decoding_sample(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_beam_sample_generate(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_beam_search_generate(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_beam_search_generate_dict_output(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_beam_search_generate_dict_outputs_use_cache(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_beam_sample_generate_dict_output(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support group beam search.") def test_group_beam_search_generate(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support group beam search.") def test_group_beam_search_generate_dict_output(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support constrained beam search.") def test_constrained_beam_search_generate(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support constrained beam search.") def test_constrained_beam_search_generate_dict_output(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support contrastive search.") def test_contrastive_generate(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support contrastive search.") def test_contrastive_generate_dict_outputs_use_cache(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support contrastive search.") def test_contrastive_generate_low_memory(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support prompt lookup decoding.") def test_prompt_lookup_decoding_matches_greedy_search(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support prompt lookup decoding.") def test_prompt_lookup_decoding_stops_at_eos(self): pass @pytest.mark.skip(reason="CSM has custom embedding approach (text and audio embeddings).") def test_model_get_set_embeddings(self): pass @pytest.mark.skip(reason="CSM has custom embedding approach (text and audio embeddings).") def test_tie_model_weights(self): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_generate_from_inputs_embeds_1_beam_search(self, _, num_beams): pass @pytest.mark.generate @unittest.skip(reason="CSM does not support beam search.") def test_model_parallel_beam_search(self): pass @unittest.skip(reason="CSM has special embeddings that can never be tied") def test_tied_weights_keys(self): pass def _get_custom_4d_mask_test_data(self): """ Overrides [ModelTesterMixin._get_custom_4d_mask_test_data] to handle third input_ids dimension. """ # Sequence in which all but the last token is the same input_ids = torch.tensor([[0, 1, 2, 3], [0, 1, 2, 4], [0, 1, 2, 5]], device=torch_device, dtype=torch.int64) input_ids = input_ids.unsqueeze(-1).expand(-1, -1, self.model_tester.config["num_codebooks"]) position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64) # Combining common prefix with the unique ending tokens: input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0) # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], ) # inverting the attention mask mask_dtype = torch.float32 min_dtype = torch.finfo(mask_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype # Creating a position_ids tensor. note the repeating figures in the end. position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix @require_read_token class CsmForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): # TODO: @eustlb, update with correct sesame's repo self.model_checkpoint = "sesame/csm-1b" def tearDown(self): cleanup(torch_device, gc_collect=True) def _load_conversation(self): ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train") ds = ds.filter(lambda x: x["conversation_id"] == 0) ds = ds.sort("turn_id") return ds[0] @slow @require_torch_accelerator def test_1b_model_integration_generate(self): """ Tests the generated tokens match the ones from the original model implementation. Such tokens are to be retreived using https://gist.github.com/eustlb/d25577a357ddcf8f4a8cd0d00baca551, which is a script that infers the original model. """ processor = AutoProcessor.from_pretrained(self.model_checkpoint) prompt = "<|begin_of_text|>[0]What are you working on?<|end_of_text|><|AUDIO|><|audio_eos|><|begin_of_text|>[1]I'm figuring out my budget.<|end_of_text|>" ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train") audio = ds[0]["audio"]["array"] inputs = processor(text=prompt, audio=audio, return_tensors="pt").to(torch_device) model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device) output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False) # fmt: off EXPECTED_OUTPUT_TOKENS = torch.tensor([[ [1140, 1818, 86, 1072, 1029, 1010, 796, 577, 1523, 1599, 902, 1308, 817, 232, 1860, 56, 327, 1399, 1069, 1014, 1980, 53, 407, 1841, 1559, 928, 972, 1432, 832, 1007, 1325, 371], [955, 1390, 1503, 861, 265, 1753, 91, 1690, 389, 1025, 1086, 495, 1192, 1334, 773, 1277, 957, 1388, 513, 1110, 539, 349, 1865, 1515, 806, 1514, 237, 1424, 1783, 1928, 523, 1925], [1925, 190, 654, 1538, 19, 37, 1923, 100, 1909, 1156, 1847, 1901, 975, 982, 2002, 544, 1933, 311, 79, 850, 238, 1034, 428, 1231, 764, 313, 973, 269, 1669, 1058, 1641, 891], [1721, 92, 1298, 989, 1868, 154, 386, 1115, 347, 384, 853, 1439, 970, 1369, 238, 1279, 268, 595, 2010, 1861, 723, 999, 578, 1612, 69, 121, 306, 1647, 1609, 1185, 1786, 1268], [1356, 1419, 1199, 1575, 418, 53, 1140, 805, 355, 324, 633, 199, 343, 1176, 784, 41, 268, 366, 1478, 466, 1591, 305, 1298, 1335, 1866, 1563, 1503, 1558, 1468, 852, 1244, 312], [1860, 1603, 546, 1805, 607, 160, 1528, 191, 1867, 1830, 861, 661, 1740, 1276, 218, 954, 1286, 1216, 1727, 1637, 983, 597, 1857, 65, 797, 947, 427, 476, 739, 978, 107, 1394], [1165, 1775, 177, 823, 100, 370, 521, 200, 2007, 434, 1444, 1205, 819, 1278, 31, 912, 150, 1546, 2035, 1147, 559, 1995, 639, 35, 1812, 56, 1485, 2003, 1573, 1693, 1762, 1313], [1932, 704, 907, 897, 56, 1587, 990, 1905, 2007, 256, 671, 868, 282, 1731, 460, 1055, 1309, 1880, 584, 1849, 1643, 1198, 310, 361, 789, 1657, 905, 1564, 1354, 110, 915, 1011], [1437, 1958, 1483, 313, 79, 28, 859, 397, 1783, 1693, 633, 1424, 1128, 1831, 605, 1123, 1496, 739, 1177, 498, 781, 1756, 1288, 890, 224, 1875, 279, 800, 1999, 1740, 348, 1420], [724, 870, 1344, 861, 429, 522, 1877, 1689, 771, 1468, 1952, 156, 856, 462, 18, 834, 33, 840, 1136, 2012, 1766, 1891, 2034, 1731, 624, 108, 1469, 653, 1344, 1682, 407, 515], [355, 26, 36, 1700, 1032, 293, 1799, 978, 944, 296, 1333, 1377, 664, 1249, 421, 516, 1178, 531, 1587, 899, 1, 1449, 934, 942, 1604, 1208, 1889, 710, 825, 2012, 1563, 1299], [629, 15, 551, 861, 310, 918, 149, 1689, 1464, 1950, 1900, 1502, 1503, 615, 477, 1090, 1556, 1393, 1143, 1112, 1934, 416, 1604, 1470, 1501, 1594, 903, 1400, 972, 199, 1075, 1643], [1281, 106, 1162, 1313, 115, 429, 1792, 1379, 1535, 1311, 743, 484, 333, 498, 547, 699, 1075, 1861, 1038, 1352, 166, 622, 759, 1398, 241, 138, 1330, 481, 1254, 1365, 985, 423], [9, 520, 323, 25, 1873, 716, 1414, 1413, 266, 1449, 1265, 290, 1341, 836, 674, 411, 913, 911, 637, 1038, 1097, 1158, 1009, 803, 737, 154, 1388, 938, 466, 725, 1216, 1549], [1944, 15, 62, 332, 540, 689, 106, 1805, 1303, 1787, 1724, 1011, 1515, 1442, 1197, 496, 2026, 1820, 906, 372, 322, 1413, 1305, 1674, 443, 1733, 828, 905, 1116, 1850, 1870, 786], [221, 220, 1093, 1790, 759, 1266, 1169, 1379, 572, 1859, 1155, 596, 1398, 412, 1788, 1963, 167, 89, 1011, 1489, 714, 73, 486, 780, 1136, 254, 983, 138, 386, 800, 1819, 1857], [1178, 1939, 107, 1605, 582, 1256, 420, 637, 648, 1023, 1809, 978, 1703, 278, 1668, 2044, 1599, 1321, 1670, 1716, 1155, 56, 602, 877, 886, 220, 910, 797, 1028, 1226, 869, 811], [1432, 1926, 1197, 1687, 540, 1815, 658, 1080, 1162, 192, 315, 1713, 422, 586, 65, 947, 493, 1536, 13, 505, 1269, 456, 1042, 645, 512, 1394, 1124, 590, 1058, 1896, 1055, 1537], [905, 564, 1739, 1594, 1201, 1773, 738, 994, 239, 1686, 1528, 368, 1791, 1924, 607, 44, 1320, 552, 1862, 1578, 591, 1434, 330, 1576, 1946, 1233, 113, 445, 669, 2041, 1242, 1406], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ]]) # fmt: on torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS) @slow @require_torch_accelerator def test_1b_model_integration_generate_no_audio(self): """ Tests the generated tokens match the ones from the original model implementation. Such tokens are to be retreived using https://gist.github.com/eustlb/aed822f765e928b9612e01b0d8836d69, which is a script that infers the original model. """ processor = AutoProcessor.from_pretrained(self.model_checkpoint) conversation = [ {"role": "0", "content": [{"type": "text", "text": "The past is just a story we tell ourselves."}]}, ] inputs = processor.apply_chat_template(conversation, tokenize=True, return_dict=True).to(torch_device) model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device) output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False) print(output_tokens) # fmt: off EXPECTED_OUTPUT_TOKENS = torch.tensor([[ [1656, 629, 723, 1785, 206, 1873, 1059, 1190, 1833, 240, 618, 350, 156, 109, 2010, 452, 435, 1764, 77, 654, 1133, 908, 1095, 74, 804, 494, 1760, 1343, 1312, 1464, 1657, 324], [366, 1532, 1945, 21, 145, 1428, 1417, 1987, 1793, 1444, 356, 1491, 849, 333, 788, 426, 1423, 1004, 414, 1823, 1169, 257, 1892, 696, 1572, 998, 1098, 523, 390, 1977, 546, 1692], [1343, 1382, 1288, 1744, 1685, 1154, 1837, 1156, 1680, 1641, 1479, 1548, 632, 824, 694, 2010, 671, 1251, 1822, 343, 638, 1372, 696, 1272, 144, 125, 1332, 579, 936, 77, 159, 357], [456, 1534, 349, 274, 1956, 1502, 1268, 1038, 1911, 523, 1360, 1159, 761, 293, 718, 1143, 63, 705, 168, 550, 413, 1372, 1771, 787, 631, 693, 784, 1789, 2039, 1131, 1601, 918], [456, 829, 2026, 1108, 1649, 207, 1308, 1440, 1192, 1394, 426, 546, 590, 36, 1682, 1827, 1387, 1425, 1909, 1500, 1438, 1297, 5, 888, 948, 1745, 1304, 1364, 1692, 131, 300, 1908], [2027, 1431, 1037, 1789, 1296, 1264, 1331, 1787, 1235, 1902, 1161, 1591, 590, 561, 1633, 1218, 510, 148, 1962, 118, 212, 608, 565, 1869, 583, 598, 532, 658, 1416, 9, 1172, 493], [1215, 460, 1722, 317, 1423, 716, 1589, 1177, 1927, 1860, 1756, 1552, 1674, 643, 74, 1256, 587, 1742, 771, 2028, 469, 1070, 1683, 1614, 699, 494, 2020, 139, 1365, 1171, 171, 904], [1615, 339, 323, 317, 469, 714, 104, 2015, 1407, 278, 468, 77, 2007, 650, 1630, 269, 168, 934, 1544, 58, 1487, 1373, 705, 874, 1252, 2031, 1995, 254, 1334, 1171, 1911, 1607], [1259, 693, 666, 1700, 1115, 607, 982, 769, 1106, 1500, 101, 88, 1698, 1864, 1358, 1594, 192, 153, 1868, 1654, 604, 1948, 526, 778, 172, 1664, 1966, 99, 1334, 1030, 1349, 1209], [1211, 579, 1369, 492, 1725, 203, 1125, 778, 701, 1982, 1420, 155, 736, 1145, 2018, 609, 658, 561, 1147, 923, 1794, 1753, 116, 1374, 612, 956, 1587, 392, 1062, 2047, 901, 1931], [460, 1093, 1346, 1917, 1223, 470, 271, 390, 547, 112, 143, 1633, 1030, 643, 96, 1759, 920, 1959, 75, 1280, 1630, 999, 333, 853, 1110, 1291, 1911, 57, 171, 1658, 1704, 1508], [908, 500, 393, 184, 1437, 482, 2008, 1834, 356, 1435, 1550, 1407, 1236, 109, 1167, 452, 1141, 934, 207, 957, 660, 670, 28, 1066, 1252, 1932, 669, 906, 1904, 1820, 2043, 881], [1599, 1031, 1474, 336, 1540, 571, 437, 1440, 1616, 1365, 1412, 1246, 400, 405, 1776, 96, 296, 38, 1597, 466, 1630, 1256, 1940, 887, 1769, 294, 285, 842, 1756, 1619, 451, 1529], [1615, 339, 1722, 525, 942, 105, 1365, 670, 785, 1316, 465, 1860, 438, 968, 547, 1938, 1816, 1429, 1065, 1942, 660, 1446, 1093, 1066, 931, 121, 688, 1033, 1178, 754, 1783, 94], [912, 1354, 598, 254, 341, 1980, 1166, 585, 1302, 473, 554, 242, 174, 2030, 2011, 325, 978, 1690, 258, 396, 1831, 1768, 1291, 1699, 2001, 433, 1414, 2012, 1045, 511, 533, 1104], [80, 1791, 1062, 1136, 391, 568, 1651, 101, 959, 2043, 1683, 760, 794, 181, 570, 540, 1599, 20, 1017, 973, 1654, 396, 586, 778, 2044, 1664, 1911, 929, 66, 897, 510, 643], [1161, 1093, 161, 1296, 589, 54, 906, 981, 1927, 605, 516, 1731, 1461, 1204, 1902, 920, 1488, 177, 805, 1402, 610, 1446, 1154, 1067, 2025, 645, 762, 1715, 415, 1658, 1713, 1607], [374, 1444, 1577, 792, 1450, 628, 604, 1729, 322, 514, 1725, 540, 1070, 575, 653, 800, 250, 187, 569, 349, 354, 1573, 176, 793, 897, 359, 536, 276, 1224, 23, 145, 1287], [1184, 415, 1644, 1737, 1788, 385, 784, 1861, 1172, 1118, 367, 1156, 234, 1946, 1742, 981, 828, 1798, 1821, 361, 1148, 670, 518, 1288, 761, 1050, 1642, 1006, 1747, 840, 1599, 720], [1141, 1731, 1670, 1542, 1347, 1907, 683, 753, 1347, 68, 2031, 153, 556, 719, 736, 1759, 1131, 1073, 1747, 1730, 1487, 1137, 1869, 1624, 699, 1900, 748, 49, 1312, 735, 726, 1268], [1141, 1383, 405, 1033, 490, 488, 1102, 471, 713, 1630, 447, 703, 1495, 1001, 1855, 354, 456, 411, 786, 853, 168, 407, 116, 699, 605, 128, 532, 1076, 208, 447, 1448, 1071], [345, 1013, 948, 1728, 1837, 337, 930, 1226, 1643, 1729, 983, 1688, 2009, 435, 1358, 721, 42, 1779, 1332, 1077, 1873, 128, 1327, 125, 1226, 1704, 705, 1459, 1449, 862, 155, 1870], [336, 904, 684, 184, 1542, 714, 1752, 1180, 1373, 1816, 504, 1716, 1066, 1086, 1212, 530, 1413, 1278, 75, 1347, 82, 1623, 1307, 1717, 1861, 494, 888, 1589, 670, 1999, 905, 1430], [578, 554, 14, 523, 1016, 300, 1589, 1017, 356, 1583, 1654, 414, 449, 376, 1413, 58, 706, 963, 388, 1626, 131, 352, 1024, 1054, 2025, 1561, 77, 1589, 1486, 431, 1249, 1508], [184, 2043, 169, 1673, 580, 162, 1752, 397, 1119, 2009, 697, 150, 1475, 157, 1523, 1402, 575, 86, 1373, 1230, 1564, 1308, 626, 1093, 1603, 1446, 1390, 1543, 1778, 1142, 1357, 1831], [1484, 1987, 932, 1728, 1504, 1618, 291, 1865, 1151, 460, 1792, 141, 234, 2043, 829, 513, 435, 791, 1037, 1541, 65, 424, 1589, 1711, 312, 1306, 212, 686, 673, 984, 1914, 1549], [513, 1536, 1844, 1319, 572, 1069, 121, 735, 1949, 1211, 1362, 1027, 105, 1379, 315, 1782, 706, 1658, 1510, 1989, 1443, 1690, 822, 1614, 1194, 1460, 992, 2040, 1178, 1474, 1110, 1326], [1858, 194, 1594, 1935, 1622, 1892, 1577, 137, 1907, 2015, 757, 414, 1823, 836, 496, 530, 1385, 1503, 1065, 1554, 664, 525, 1031, 433, 69, 466, 1016, 1846, 1609, 1658, 911, 94], [1134, 1744, 323, 691, 1837, 347, 1871, 172, 811, 91, 1883, 436, 1912, 23, 1336, 1684, 519, 1612, 1219, 1402, 728, 1953, 1658, 641, 27, 1340, 436, 139, 2008, 1030, 159, 324], [1270, 1536, 1639, 414, 1387, 1170, 1067, 1701, 1414, 505, 1122, 36, 1731, 350, 1552, 1214, 1444, 30, 107, 172, 480, 1858, 655, 168, 1107, 691, 1272, 797, 1656, 548, 1407, 1375], [1270, 286, 1371, 1552, 1622, 1739, 1348, 2018, 345, 1537, 1941, 2024, 1423, 740, 284, 513, 91, 1228, 2015, 385, 992, 39, 813, 803, 2025, 497, 663, 462, 1609, 334, 927, 1470], [1718, 994, 265, 1421, 1622, 1098, 845, 1868, 832, 459, 447, 619, 1970, 929, 513, 63, 1448, 1509, 1219, 1942, 285, 1373, 1259, 1004, 11, 1040, 1984, 57, 188, 1687, 1475, 805], [1157, 832, 480, 1225, 1019, 347, 326, 999, 125, 1542, 118, 1383, 1343, 1077, 1821, 1602, 1978, 1642, 618, 808, 692, 1953, 1353, 963, 619, 1291, 1016, 1458, 1995, 1688, 1872, 1718], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ]]) # fmt: on torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS) @slow @require_torch_accelerator def test_1b_model_integration_generate_multiple_audio(self): """ Test the generated tokens match the ones from the original model implementation. Such tokens are to be retreived using https://gist.github.com/eustlb/0c94de002e1325abb61d32217f74c0f8, which is a script that infers the original model. """ processor = AutoProcessor.from_pretrained(self.model_checkpoint) ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train") conversation = [] # context for text, audio, speaker_id in zip(ds[:4]["text"], ds[:4]["audio"], ds[:4]["speaker_id"]): conversation.append( { "role": f"{speaker_id}", "content": [{"type": "text", "text": text}, {"type": "audio", "path": audio["array"]}], } ) # text prompt conversation.append({"role": f"{ds[4]['speaker_id']}", "content": [{"type": "text", "text": ds[4]["text"]}]}) inputs = processor.apply_chat_template( conversation, tokenize=True, return_dict=True, ).to(torch_device) model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device) output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False) # fmt: off EXPECTED_OUTPUT_TOKENS = torch.tensor([[ [420, 1189, 1311, 318, 359, 694, 1550, 1044, 1614, 1437, 1978, 537, 554, 1681, 147, 1225, 422, 1357, 1681, 1619, 165, 641, 1132, 1975, 1568, 406, 756, 503, 1673, 1428, 762, 781], [1848, 1412, 957, 1656, 871, 540, 1999, 175, 711, 1383, 1814, 104, 742, 1285, 733, 1251, 1165, 1915, 1392, 645, 1804, 913, 1772, 632, 376, 1507, 1132, 725, 716, 1121, 1769, 1509], [429, 1138, 895, 1018, 1099, 257, 1395, 1015, 576, 1599, 497, 19, 1858, 1437, 282, 357, 1143, 828, 1481, 70, 985, 551, 935, 278, 1102, 1453, 1902, 755, 526, 498, 1441, 1733], [546, 343, 1547, 879, 2039, 692, 1999, 1150, 1969, 1866, 1178, 199, 1913, 1738, 1530, 1728, 1193, 74, 695, 612, 1095, 1597, 1381, 683, 1385, 2045, 1069, 865, 438, 70, 1437, 318], [1741, 1621, 733, 1580, 1006, 482, 1508, 1722, 1529, 1822, 745, 552, 142, 1568, 704, 480, 214, 552, 321, 1858, 1902, 1042, 1249, 1328, 1730, 1218, 1755, 597, 670, 738, 1056, 762], [1264, 1561, 1307, 730, 1403, 688, 212, 949, 1871, 994, 1174, 674, 858, 293, 1577, 1221, 1024, 1535, 1224, 872, 509, 1971, 46, 440, 1531, 1100, 1466, 732, 964, 381, 1933, 1612], [1407, 982, 1665, 1247, 1636, 1546, 939, 882, 1999, 618, 484, 1632, 66, 430, 290, 327, 351, 1236, 687, 504, 1973, 1073, 1233, 1972, 82, 1655, 361, 1612, 861, 1085, 880, 1407], [584, 637, 304, 1805, 1683, 1381, 404, 862, 1278, 916, 1695, 370, 316, 1049, 237, 1187, 1389, 300, 680, 135, 1068, 1368, 810, 1392, 103, 1459, 1051, 644, 38, 1517, 790, 646], [471, 1984, 1333, 553, 193, 319, 1604, 1546, 153, 513, 990, 839, 1714, 1998, 984, 1882, 1055, 476, 1821, 1476, 1522, 1817, 949, 1923, 1416, 1885, 1832, 1368, 1782, 1229, 436, 918], [28, 1238, 489, 1580, 596, 1232, 840, 835, 297, 762, 474, 1106, 1761, 483, 1165, 923, 1184, 1181, 1724, 398, 1484, 860, 1945, 665, 1925, 14, 67, 1693, 1853, 1283, 1822, 1973], [20, 637, 253, 1254, 738, 188, 593, 1239, 1768, 1047, 1703, 1512, 1398, 464, 13, 161, 651, 1844, 666, 210, 1510, 1798, 614, 1649, 1751, 341, 808, 915, 1965, 840, 778, 950], [1879, 2028, 1405, 694, 432, 2036, 612, 387, 1843, 1204, 1044, 8, 1538, 542, 1198, 598, 1131, 760, 1217, 901, 800, 1046, 136, 639, 1320, 618, 606, 707, 574, 1288, 1254, 198], [1874, 937, 1063, 1341, 254, 13, 359, 888, 1837, 1246, 980, 818, 2046, 1258, 1290, 1470, 2028, 1701, 228, 1766, 51, 93, 296, 991, 1094, 1694, 156, 1207, 401, 967, 867, 211], [1762, 426, 1749, 2004, 314, 903, 1254, 220, 1330, 1813, 534, 102, 658, 1460, 603, 1046, 402, 2005, 783, 973, 1764, 210, 1458, 803, 605, 369, 669, 352, 1964, 1549, 632, 1375], [1577, 386, 503, 1492, 604, 405, 1329, 349, 180, 875, 329, 196, 514, 1854, 925, 159, 1428, 1300, 1510, 329, 76, 1682, 1036, 854, 695, 1097, 816, 382, 1417, 697, 1693, 194], [1109, 848, 1385, 126, 1136, 979, 687, 130, 2045, 140, 562, 361, 921, 1706, 1060, 1723, 165, 1304, 203, 1067, 158, 692, 980, 313, 1896, 1812, 839, 837, 985, 116, 866, 1049], [1810, 1092, 1534, 1730, 773, 2044, 1098, 1326, 85, 249, 455, 1728, 860, 443, 1841, 1885, 1698, 864, 1747, 1083, 1591, 1785, 1577, 1001, 1025, 1837, 1504, 1839, 1900, 1932, 230, 968], [1547, 1465, 896, 794, 613, 1383, 1806, 1984, 526, 671, 100, 519, 2037, 1631, 1724, 633, 824, 994, 893, 1448, 1793, 1237, 1855, 699, 349, 143, 270, 535, 1550, 101, 22, 1311], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ]]) # fmt: on torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS) @slow @require_torch_accelerator def test_1b_model_integration_generate_batched(self): """ Test the generated tokens match the ones from the original model implementation. Such tokens are to be retreived using https://gist.github.com/eustlb/bcc532b53161bc31da3d66cb07ae193f, which is a script that infers the original model. """ processor = AutoProcessor.from_pretrained(self.model_checkpoint) ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train") conversation = [ [ { "role": f"{ds[0]['speaker_id']}", "content": [ {"type": "text", "text": ds[0]["text"]}, {"type": "audio", "path": ds[0]["audio"]["array"]}, ], }, { "role": f"{ds[1]['speaker_id']}", "content": [ {"type": "text", "text": ds[1]["text"]}, ], }, ], [ { "role": f"{ds[0]['speaker_id']}", "content": [ {"type": "text", "text": ds[0]["text"]}, ], } ], ] inputs = processor.apply_chat_template( conversation, tokenize=True, return_dict=True, ).to(torch_device) model = CsmForConditionalGeneration.from_pretrained(self.model_checkpoint, device_map=torch_device) output_tokens = model.generate(**inputs, do_sample=False, depth_decoder_do_sample=False) # fmt: off EXPECTED_OUTPUT_TOKENS = torch.tensor([ [ [1140, 1818, 1713, 1072, 1029, 1185, 697, 358, 220, 481, 1127, 1779, 817, 891, 958, 1058, 672, 495, 426, 1135, 236, 1440, 829, 2023, 1097, 94, 926, 1830, 114, 307, 235, 1190], [955, 968, 696, 676, 52, 618, 0, 1818, 1285, 143, 1733, 1268, 1317, 1510, 1027, 2033, 1276, 1744, 790, 638, 1179, 1125, 650, 266, 1180, 364, 1015, 1604, 1152, 154, 178, 284], [1925, 274, 433, 273, 1391, 1528, 1683, 1120, 976, 944, 357, 1681, 847, 1783, 546, 857, 1662, 1695, 40, 152, 2039, 1076, 994, 1743, 265, 1751, 602, 981, 483, 981, 538, 1381], [1908, 1625, 1975, 729, 1067, 1844, 837, 1849, 224, 1223, 1037, 1188, 1428, 1977, 317, 530, 990, 1670, 766, 1411, 811, 154, 433, 1645, 1565, 1291, 1390, 49, 1160, 1464, 1911, 1961], [1908, 566, 175, 1387, 1437, 1873, 1785, 1536, 961, 414, 406, 1753, 835, 284, 764, 1522, 1889, 1816, 840, 440, 756, 860, 1753, 516, 601, 1498, 280, 1425, 1904, 1540, 1074, 314], [1860, 296, 1766, 361, 1155, 1675, 528, 1975, 1286, 113, 1656, 237, 372, 580, 1571, 1958, 502, 893, 1300, 261, 313, 455, 693, 1658, 654, 1585, 1723, 721, 178, 679, 908, 1077], [1165, 1787, 1877, 1904, 85, 609, 1007, 1724, 1959, 245, 645, 463, 1321, 1695, 192, 711, 1892, 1193, 302, 1835, 69, 940, 148, 913, 110, 108, 1244, 1510, 165, 726, 745, 1746], [1405, 1410, 186, 1569, 1214, 1920, 1946, 1907, 990, 1152, 1401, 1713, 541, 115, 423, 616, 1191, 1149, 1122, 9, 303, 195, 906, 566, 1718, 668, 1637, 1975, 51, 2005, 1260, 1672], [1932, 780, 143, 110, 286, 1460, 1136, 1366, 1788, 446, 645, 587, 1708, 189, 1295, 526, 1667, 735, 707, 1215, 27, 834, 1865, 182, 1776, 1130, 528, 1523, 1156, 316, 492, 1666], [1437, 364, 314, 432, 575, 1640, 529, 1128, 973, 789, 1820, 808, 1317, 1681, 347, 471, 737, 1626, 1386, 75, 433, 517, 365, 1982, 1434, 1378, 1059, 56, 1475, 653, 1507, 861], [724, 538, 1140, 1853, 76, 402, 0, 397, 330, 1787, 1382, 682, 1134, 296, 377, 997, 705, 627, 1700, 17, 1791, 1000, 1271, 1019, 1552, 1521, 668, 534, 433, 344, 1007, 1046], [925, 1297, 1017, 1785, 1403, 520, 1603, 1908, 665, 1827, 951, 1588, 1526, 414, 1945, 1153, 1933, 1571, 1821, 104, 179, 769, 619, 117, 56, 790, 721, 992, 1284, 1495, 1459, 823], [629, 1208, 689, 924, 1617, 1100, 1028, 1231, 1708, 1582, 200, 2011, 1611, 1966, 1153, 1326, 2036, 1515, 884, 1790, 581, 549, 1491, 701, 973, 836, 2031, 1249, 1411, 365, 1946, 1552], [1281, 1305, 610, 1666, 676, 544, 1788, 315, 159, 809, 1333, 1785, 1159, 1084, 1356, 318, 1933, 854, 475, 638, 1616, 1801, 1816, 1921, 283, 1745, 814, 974, 1056, 1316, 1509, 2031], [9, 212, 1590, 163, 1289, 923, 2046, 1620, 632, 127, 963, 405, 850, 471, 1430, 108, 1845, 1196, 1928, 143, 1717, 1054, 1288, 1351, 1340, 1294, 831, 480, 1562, 2004, 483, 1776], [221, 142, 1555, 1434, 1481, 1371, 1873, 1607, 207, 631, 1042, 1084, 472, 465, 1772, 1002, 1761, 1912, 1298, 1918, 685, 1053, 1635, 1536, 497, 55, 1432, 1394, 1512, 365, 2026, 1210], [1741, 1923, 930, 1423, 1258, 1227, 879, 1217, 1999, 422, 420, 1832, 1660, 1542, 92, 2000, 1790, 1909, 56, 695, 704, 1752, 371, 792, 625, 328, 567, 1397, 1557, 390, 1424, 14], [1178, 812, 577, 895, 1386, 339, 1467, 844, 235, 703, 551, 2021, 1592, 1042, 353, 621, 1672, 653, 2029, 103, 766, 182, 2016, 1921, 556, 1092, 1579, 626, 1950, 70, 1467, 850], [1352, 472, 577, 351, 1126, 1943, 52, 2028, 430, 1017, 1136, 645, 820, 2028, 723, 1385, 1922, 323, 106, 267, 438, 1064, 202, 1249, 244, 1962, 625, 1380, 476, 924, 1221, 1854], [905, 811, 374, 2021, 1067, 675, 927, 427, 416, 1521, 663, 77, 457, 1849, 1362, 262, 1669, 1238, 286, 102, 555, 1809, 1585, 1918, 972, 1446, 688, 523, 1904, 943, 17, 904], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], [ [1375, 203, 265, 164, 200, 1867, 976, 924, 1972, 1637, 1048, 271, 1912, 1430, 853, 1942, 260, 1642, 400, 57, 1376, 1626, 1821, 1163, 619, 777, 1076, 951, 389, 1820, 84, 1417], [914, 527, 286, 968, 305, 1314, 805, 1703, 87, 559, 1980, 1124, 1726, 36, 1139, 618, 1628, 519, 1943, 781, 400, 1265, 438, 113, 87, 856, 465, 162, 1099, 352, 1141, 274], [1408, 6, 126, 2009, 90, 996, 934, 134, 1857, 126, 602, 876, 1092, 1962, 1205, 828, 707, 1063, 393, 1533, 123, 1086, 1749, 1324, 1, 1763, 1707, 1191, 34, 1323, 1017, 1787], [1000, 683, 1630, 703, 1574, 587, 25, 1049, 213, 1270, 1641, 1072, 1892, 1634, 1603, 90, 867, 2037, 1021, 715, 206, 507, 1138, 959, 1822, 1785, 280, 1100, 1660, 251, 1903, 988], [1657, 1981, 246, 1048, 1952, 451, 305, 423, 2000, 416, 756, 1748, 7, 748, 1866, 1795, 1682, 1832, 338, 212, 1685, 518, 154, 1407, 416, 765, 776, 25, 55, 458, 612, 262], [1034, 564, 667, 1474, 1212, 350, 712, 941, 1151, 1182, 1280, 640, 924, 1722, 1816, 458, 226, 359, 1518, 102, 1203, 459, 676, 1788, 1110, 393, 1974, 1721, 795, 1459, 798, 1723], [742, 1616, 119, 653, 441, 679, 246, 1432, 486, 1615, 1191, 500, 650, 223, 687, 1765, 1875, 963, 1385, 863, 151, 1771, 458, 1170, 737, 1932, 785, 1954, 1067, 16, 1986, 2029], [1437, 1078, 1767, 1452, 1392, 45, 2010, 1664, 245, 2015, 1416, 1055, 457, 985, 740, 1594, 1562, 1838, 258, 1431, 701, 604, 1813, 352, 792, 632, 21, 895, 70, 609, 850, 1599], [983, 1961, 54, 135, 846, 711, 473, 1630, 1373, 1094, 251, 525, 632, 1014, 1594, 1594, 1752, 398, 1266, 1357, 942, 1680, 191, 874, 483, 1291, 381, 1873, 1964, 1278, 1477, 122], [1663, 1969, 1887, 113, 145, 251, 1133, 156, 245, 1641, 209, 1322, 2037, 836, 539, 667, 940, 797, 1758, 1357, 191, 1137, 587, 1699, 27, 701, 395, 99, 1682, 876, 762, 839], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ]) # fmt: on torch.testing.assert_close(output_tokens.cpu(), EXPECTED_OUTPUT_TOKENS)
transformers/tests/models/csm/test_modeling_csm.py/0
{ "file_path": "transformers/tests/models/csm/test_modeling_csm.py", "repo_id": "transformers", "token_count": 20716 }
567
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Data2VecAudio model.""" import unittest from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import Data2VecTextConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextModel, ) from transformers.models.data2vec.modeling_data2vec_text import ( Data2VecTextForTextEmbeddings, create_position_ids_from_input_ids, ) class Data2VecTextModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return Data2VecTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = Data2VecTextModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = Data2VecTextForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = Data2VecTextForCausalLM(config=config).to(torch_device).eval() # make sure that ids don't start with pad token mask = input_ids.ne(config.pad_token_id).long() input_ids = input_ids * mask # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) # make sure that ids don't start with pad token mask = next_tokens.ne(config.pad_token_id).long() next_tokens = next_tokens * mask next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = Data2VecTextForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = Data2VecTextForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = Data2VecTextForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Data2VecTextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( Data2VecTextForCausalLM, Data2VecTextForMaskedLM, Data2VecTextModel, Data2VecTextForSequenceClassification, Data2VecTextForTokenClassification, Data2VecTextForMultipleChoice, Data2VecTextForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Data2VecTextModel, "fill-mask": Data2VecTextForMaskedLM, "question-answering": Data2VecTextForQuestionAnswering, "text-classification": Data2VecTextForSequenceClassification, "text-generation": Data2VecTextForCausalLM, "token-classification": Data2VecTextForTokenClassification, "zero-shot": Data2VecTextForSequenceClassification, } if is_torch_available() else {} ) model_split_percents = [0.5, 0.9] def setUp(self): self.model_tester = Data2VecTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Data2VecTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "facebook/data2vec-text-base" model = Data2VecTextModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is Data2VecTextForTextEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = Data2VecTextForTextEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is Data2VecTextForTextEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = Data2VecTextForTextEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) @require_torch class Data2VecTextModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): model = Data2VecTextForMaskedLM.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 50265)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor([[[0.2328, 0.0000, 1.1710], [2.2525, 0.0000, 1.9937], [2.1280, 0.0000, 1.8691]]]) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head(self): model = Data2VecTextModel.from_pretrained("facebook/data2vec-text-base") input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids)[0] # compare the actual values for a slice. expected_slice = torch.tensor( [[[0.1998, -0.0379, 0.0024], [-0.0971, -0.2214, -0.1798], [-0.0789, -0.2400, -0.1898]]] ) torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/data2vec/test_modeling_data2vec_text.py/0
{ "file_path": "transformers/tests/models/data2vec/test_modeling_data2vec_text.py", "repo_id": "transformers", "token_count": 9928 }
568
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Dinov2 model.""" import unittest from transformers import Dinov2Config from transformers.testing_utils import ( is_flaky, require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Dinov2Backbone, Dinov2ForImageClassification, Dinov2Model if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class Dinov2ModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, attn_implementation="eager", mask_ratio=0.5, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.attn_implementation = attn_implementation self.mask_ratio = mask_ratio # in Dinov2, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.num_masks = int(self.mask_ratio * self.seq_length) self.mask_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return Dinov2Config( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = Dinov2Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_backbone(self, config, pixel_values, labels): model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) expected_size = self.image_size // config.patch_size self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], expected_size, expected_size] ) # verify channels self.parent.assertEqual(len(model.channels), 1) # verify backbone works with apply_layernorm=False and reshape_hidden_states=False config.apply_layernorm = False config.reshape_hidden_states = False model = Dinov2Backbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.seq_length, self.hidden_size] ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = Dinov2ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = Dinov2ForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class Dinov2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Dinov2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ test_torch_exportable = True all_model_classes = ( ( Dinov2Model, Dinov2ForImageClassification, Dinov2Backbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": Dinov2Model, "image-classification": Dinov2ForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Dinov2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Dinov2Config, has_text_modality=False, hidden_size=37) @is_flaky(max_attempts=3, description="`torch.nn.init.trunc_normal_` is flaky.") def test_initialization(self): super().test_initialization() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Dinov2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Dinov2 does not support feedforward chunking yet") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model_name = "facebook/dinov2-base" model = Dinov2Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class Dinov2ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("facebook/dinov2-base") if is_vision_available() else None @slow def test_inference_no_head(self): model = Dinov2Model.from_pretrained("facebook/dinov2-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the last hidden states expected_shape = torch.Size((1, 257, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-2.2005, -0.4495, 1.0964], [-3.3959, -0.8942, -1.0315], [-2.9355, 1.1564, -0.7656]], device=torch_device, ) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) @require_torch class Dinov2BackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (Dinov2Backbone,) if is_torch_available() else () config_class = Dinov2Config has_attentions = False def setUp(self): self.model_tester = Dinov2ModelTester(self)
transformers/tests/models/dinov2/test_modeling_dinov2.py/0
{ "file_path": "transformers/tests/models/dinov2/test_modeling_dinov2.py", "repo_id": "transformers", "token_count": 5179 }
569
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor if is_torchvision_available(): from transformers import DonutImageProcessorFast class DonutImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_thumbnail=True, do_align_axis=False, do_pad=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size if size is not None else {"height": 18, "width": 20} self.do_thumbnail = do_thumbnail self.do_align_axis = do_align_axis self.do_pad = do_pad self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class DonutImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = DonutImageProcessor if is_vision_available() else None fast_image_processing_class = DonutImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = DonutImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_thumbnail")) self.assertTrue(hasattr(image_processing, "do_align_long_axis")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 20}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) # Previous config had dimensions in (width, height) order image_processor = image_processing_class.from_dict(self.image_processor_dict, size=(42, 84)) self.assertEqual(image_processor.size, {"height": 84, "width": 42}) def test_image_processor_preprocess_with_kwargs(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) height = 84 width = 42 # Previous config had dimensions in (width, height) order encoded_images = image_processing(image_inputs[0], size=(width, height), return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, height, width, ), ) @is_flaky() def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @require_torch @require_vision class DonutImageProcessingAlignAxisTest(DonutImageProcessingTest): def setUp(self): super().setUp() self.image_processor_tester = DonutImageProcessingTester(self, do_align_axis=True)
transformers/tests/models/donut/test_image_processing_donut.py/0
{ "file_path": "transformers/tests/models/donut/test_image_processing_donut.py", "repo_id": "transformers", "token_count": 4842 }
570
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch EoMT model.""" import unittest import requests from transformers import AutoImageProcessor, EomtConfig, EomtForUniversalSegmentation, pipeline from transformers.testing_utils import require_torch, require_torch_accelerator, require_torch_fp16, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image class EomtForUniversalSegmentationTester: def __init__( self, parent, batch_size=2, is_training=True, image_size=40, patch_size=2, num_queries=5, num_register_tokens=19, num_labels=4, hidden_size=8, num_attention_heads=2, num_hidden_layers=4, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.num_queries = num_queries self.image_size = image_size self.patch_size = patch_size self.num_labels = num_labels self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.num_register_tokens = num_register_tokens num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def get_config(self): config = { "image_size": self.image_size, "patch_size": self.patch_size, "num_labels": self.num_labels, "hidden_size": self.hidden_size, "num_attention_heads": self.num_attention_heads, "num_hidden_layers": self.num_hidden_layers, "num_register_tokens": self.num_register_tokens, "num_queries": self.num_queries, "num_blocks": 1, } return EomtConfig(**config) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, 3, self.image_size, self.image_size]).to(torch_device) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.image_size, self.image_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, mask_labels, class_labels def prepare_config_and_inputs_for_common(self): config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def prepare_config_and_inputs_for_training(self): config, pixel_values, mask_labels, class_labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "mask_labels": mask_labels, "class_labels": class_labels} return config, inputs_dict @require_torch class EomtForUniversalSegmentationTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (EomtForUniversalSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": EomtForUniversalSegmentation} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False test_torch_exportable = False def setUp(self): self.model_tester = EomtForUniversalSegmentationTester(self) self.config_tester = ConfigTester(self, config_class=EomtConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model_with_labels(self): size = (self.model_tester.image_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "mask_labels": torch.randn((2, 10, *size), device=torch_device), "class_labels": torch.zeros(2, 10, device=torch_device).long(), } config = self.model_tester.get_config() model = EomtForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # Check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="EoMT does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EoMT does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="EoMT is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="EoMT does not use token embeddings") def test_resize_tokens_embeddings(self): pass def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_training() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_initialization(self): # Apart from the below params, all other parameters are initialized using kaiming uniform. non_uniform_init_parms = [ "layernorm.bias", "layernorm.weight", "norm1.bias", "norm1.weight", "norm2.bias", "norm2.weight", "layer_scale1.lambda1", "layer_scale2.lambda1", "register_tokens", "cls_token", ] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if any(x in name for x in non_uniform_init_parms): self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_torch class EomtForUniversalSegmentationIntegrationTest(unittest.TestCase): def setUp(self): self.model_id = "tue-mps/coco_panoptic_eomt_large_640" @slow def test_inference(self): model = EomtForUniversalSegmentation.from_pretrained(self.model_id, device_map="auto") processor = AutoImageProcessor.from_pretrained(self.model_id) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(model.device) with torch.inference_mode(): outputs = model(**inputs) self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134)) self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160)) # fmt: off EXPECTED_SLICE = torch.tensor([ [ 13.2540, 8.9279, 8.6631, 12.3760, 10.1429], [ -3.4815, -36.4630, -45.5604, -46.8404, -37.5099], [ -6.8689, -44.4206, -62.7591, -59.2928, -47.7035], [ -2.9380, -42.0659, -57.4382, -55.1537, -43.5142], [ -8.4387, -38.5275, -53.1383, -47.0064, -38.9667], ]).to(model.device) # fmt: on output_slice = outputs.masks_queries_logits[0, 0, :5, :5] torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2) # fmt: off EXPECTED_SLICE = torch.tensor([ [-0.6977, -6.4907, -4.1178, -6.5554, -6.6529], [-0.3650, -6.6560, -4.0143, -6.5776, -6.5879], [-0.8820, -6.7175, -3.5334, -6.8569, -6.2415], [ 0.4502, -5.3911, -3.0232, -5.9411, -6.3243], [ 0.3157, -5.6321, -2.6716, -5.5740, -5.5607], ]).to(model.device) # fmt: on output_slice = outputs.class_queries_logits[0, :5, :5] torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @require_torch_accelerator @require_torch_fp16 @slow def test_inference_fp16(self): model = EomtForUniversalSegmentation.from_pretrained(self.model_id, dtype=torch.float16, device_map="auto") processor = AutoImageProcessor.from_pretrained(self.model_id) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(model.device) with torch.inference_mode(): outputs = model(**inputs) self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134)) self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160)) @slow def test_semantic_segmentation_inference(self): model_id = "tue-mps/ade20k_semantic_eomt_large_512" model = EomtForUniversalSegmentation.from_pretrained(model_id, device_map="auto") processor = AutoImageProcessor.from_pretrained(model_id) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(model.device) with torch.inference_mode(): outputs = model(**inputs) self.assertTrue(outputs.class_queries_logits.shape == (2, 100, 151)) self.assertTrue(outputs.masks_queries_logits.shape == (2, 100, 128, 128)) preds = processor.post_process_semantic_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0] self.assertTrue(preds.shape == (image.size[1], image.size[0])) # fmt: off EXPECTED_SLICE = torch.tensor([ [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39], [39, 39, 39, 39, 39, 39, 39, 39, 39, 39] ], device=model.device) # fmt: on output_slice = preds[:10, :10] torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_panoptic_segmentation_inference(self): model = EomtForUniversalSegmentation.from_pretrained(self.model_id, device_map="auto") processor = AutoImageProcessor.from_pretrained(self.model_id) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(model.device) with torch.inference_mode(): outputs = model(**inputs) self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 134)) self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160)) preds = processor.post_process_panoptic_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0] segmentation, segments_info = preds["segmentation"], preds["segments_info"] # fmt: off EXPECTED_SLICE = torch.tensor([ [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, 2, 2, 2, 2, 2], [-1, -1, -1, 2, 2, 2, 2, 2, 2, 2], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] ], device=model.device) EXPECTED_SEGMENTS_INFO = [ {"id": 0, "label_id": 15, "score": 0.99935}, {"id": 1, "label_id": 15, "score": 0.998688}, {"id": 2, "label_id": 57, "score": 0.954325}, {"id": 3, "label_id": 65, "score": 0.997285}, {"id": 4, "label_id": 65, "score": 0.99711} ] # fmt: on output_slice = segmentation[:10, :10] torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2) for actual, expected in zip(segments_info, EXPECTED_SEGMENTS_INFO): self.assertEqual(actual["id"], expected["id"]) self.assertEqual(actual["label_id"], expected["label_id"]) self.assertAlmostEqual(actual["score"], expected["score"], delta=1e-3) @slow def test_instance_segmentation_inference(self): model_id = "tue-mps/coco_instance_eomt_large_640" model = EomtForUniversalSegmentation.from_pretrained(model_id, device_map="auto") processor = AutoImageProcessor.from_pretrained(model_id) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(images=image, return_tensors="pt").to(model.device) with torch.inference_mode(): outputs = model(**inputs) self.assertTrue(outputs.class_queries_logits.shape == (1, 200, 81)) self.assertTrue(outputs.masks_queries_logits.shape == (1, 200, 160, 160)) preds = processor.post_process_instance_segmentation(outputs, target_sizes=[(image.size[1], image.size[0])])[0] segmentation, segments_info = preds["segmentation"], preds["segments_info"] # fmt: off EXPECTED_SLICE = torch.tensor([ [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], [-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], [-1., -1., -1., 0., 0., 1., 1., 1., 1., 1.], [ 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.] ], device=model.device) EXPECTED_SEGMENTS_INFO = [ {'id': 0, 'label_id': 57, 'score': 0.871247}, {'id': 1, 'label_id': 57, 'score': 0.821225}, {'id': 2, 'label_id': 15, 'score': 0.976252}, {'id': 3, 'label_id': 65, 'score': 0.972960}, {'id': 4, 'label_id': 65, 'score': 0.981109}, {'id': 5, 'label_id': 15, 'score': 0.972689} ] # fmt: on output_slice = segmentation[:10, :10] torch.testing.assert_close(output_slice, EXPECTED_SLICE, rtol=1e-2, atol=1e-2) for actual, expected in zip(segments_info, EXPECTED_SEGMENTS_INFO): self.assertEqual(actual["id"], expected["id"]) self.assertEqual(actual["label_id"], expected["label_id"]) self.assertAlmostEqual(actual["score"], expected["score"], delta=1e-3) @slow def test_segmentation_pipeline(self): image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) pipe = pipeline(model=self.model_id, subtask="panoptic", device=torch_device) output = pipe(image) EXPECTED_OUTPUT_LABELS = ["cat", "cat", "couch", "remote", "remote"] output_labels = [segment["label"] for segment in output] self.assertEqual(output_labels, EXPECTED_OUTPUT_LABELS)
transformers/tests/models/eomt/test_modeling_eomt.py/0
{ "file_path": "transformers/tests/models/eomt/test_modeling_eomt.py", "repo_id": "transformers", "token_count": 9628 }
571
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Fuyu model.""" import copy import io import unittest import pytest import requests import torch from parameterized import parameterized from transformers import FuyuConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_accelerator, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_vision_available(): from PIL import Image if is_torch_available() and is_vision_available(): from transformers import FuyuProcessor if is_torch_available(): from transformers import FuyuForCausalLM, FuyuModel class FuyuModelTester: def __init__( self, parent, batch_size=13, seq_length=7, num_image_tokens=2, image_size=30, patch_size=15, num_channels=3, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=10, image_token_id=1, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_image_tokens = num_image_tokens self.seq_length = seq_length + num_image_tokens self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.image_token_id = image_token_id self.scope = scope def prepare_config_and_inputs(self): config = self.get_config() input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids[input_ids == config.image_token_id] = self.pad_token_id input_ids[:, : self.num_image_tokens] = config.image_token_id input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) return config, input_ids, input_mask, sequence_labels, token_labels def get_config(self): return FuyuConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, image_token_id=self.image_token_id, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs image_patches = floats_tensor( [self.batch_size, self.num_image_tokens, config.num_channels * config.patch_size**2] ) inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask, "image_patches": image_patches} return config, inputs_dict @require_torch class FuyuModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( FuyuModel, FuyuForCausalLM, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"text-generation": FuyuForCausalLM, "image-text-to-text": FuyuForCausalLM} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_cpu_offload = False test_disk_offload = False test_model_parallel = False def setUp(self): self.model_tester = FuyuModelTester(self) def test_mismatching_image_patches(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further # two image token and two image _ = model(**curr_input_dict) # successful forward with no modifications # remove one image but leave the image token in text input_ids = curr_input_dict["input_ids"] image_patches = curr_input_dict["image_patches"][1:, ...] with self.assertRaises(ValueError): _ = model(input_ids=input_ids, image_patches=image_patches) # remove one image token from text input_ids = curr_input_dict["input_ids"][2:] image_patches = curr_input_dict["image_patches"] with self.assertRaises(ValueError): _ = model(input_ids=input_ids, image_patches=image_patches) @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @parameterized.expand([("random",), ("same",)]) @pytest.mark.generate @unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices") def test_assisted_decoding_matches_greedy_search(self): pass @pytest.mark.generate @unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices") def test_assisted_decoding_sample(self): pass # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model.") def test_disk_offload_bin(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model.") def test_disk_offload_safetensors(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model.") def test_model_parallelism(self): super().test_model_parallelism() @unittest.skip(reason="Fuyu `prepare_inputs_for_generation` function doesn't have cache position.") def test_generate_continue_from_inputs_embeds(): pass @unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("Persimmon backbone applies key/query norm which doesn't work with packing") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @slow @require_torch_accelerator class FuyuModelIntegrationTest(unittest.TestCase): @cached_property def default_processor(self): return FuyuProcessor.from_pretrained("adept/fuyu-8b") @cached_property def default_model(self): return FuyuForCausalLM.from_pretrained("adept/fuyu-8b", dtype="float16", device_map=torch_device) def test_greedy_generation(self): processor = self.default_processor model = self.default_model url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" image = Image.open(io.BytesIO(requests.get(url).content)) text_prompt_coco_captioning = "Generate a coco-style caption.\n" inputs = processor(images=image, text=text_prompt_coco_captioning, return_tensors="pt").to( torch_device, torch.float16 ) generated_ids = model.generate(**inputs, max_new_tokens=10) # take the last 8 tokens (in order to skip special \n\x04 characters) and decode them generated_text = processor.batch_decode(generated_ids[:, -8:], skip_special_tokens=True)[0] self.assertEqual(generated_text, "A blue bus parked on the side of a road.") """ @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bus_color(self): EXPECTED_TEXT_COMPLETION = "The bus is blue.\n|ENDOFTEXT|" text_prompt_bus_color = "What color is the bus?\n" model_inputs_bus_color = self.processor(text=text_prompt_bus_color, images=self.bus_image_pil) generated_tokens = self.model.generate(**model_inputs_bus_color, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_chart_vqa(self): EXPECTED_TEXT_TOKENS = ["The","life expectancy","at","birth","of male","s in","","20","18","is","","80",".","7",".","\n","|ENDOFTEXT|",] # fmt: skip expected_text_completion = " ".join(EXPECTED_TEXT_TOKENS) # TODO make sure the end string matches text_prompt_chart_vqa = "What is the highest life expectancy at birth of male?\n" chart_image_url = ( "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/chart.png" ) chart_image_pil = Image.open(io.BytesIO(requests.get(chart_image_url).content)) model_inputs_chart_vqa = self.processor(text=text_prompt_chart_vqa, images=chart_image_pil) generated_tokens = self.model.generate(**model_inputs_chart_vqa, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(expected_text_completion, clean_sequence) @slow @require_torch_accelerator def test_model_8b_chat_greedy_generation_bounding_box(self): EXPECTED_TEXT_COMPLETION = "\x00194213202244\x01|ENDOFTEXT|" text_prompt_bbox = "When presented with a box, perform OCR to extract text contained within it. If provided with text, generate the corresponding bounding box.\\nWilliams" # noqa: E231 bbox_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bbox_sample_image.png" bbox_image_pil = Image.open(io.BytesIO(requests.get(bbox_image_url).content)) model_inputs_bbox = self.processor(text=text_prompt_bbox, images=bbox_image_pil) generated_tokens = self.model.generate(**model_inputs_bbox, max_new_tokens=10) text = self.processor.tokenizer.batch_decode(generated_tokens) end_sequence = text[0].split("\x04")[1] clean_sequence = ( end_sequence[: end_sequence.find("|ENDOFTEXT|") + len("|ENDOFTEXT|")] if "|ENDOFTEXT|" in end_sequence else end_sequence ) self.assertEqual(EXPECTED_TEXT_COMPLETION, clean_sequence) """
transformers/tests/models/fuyu/test_modeling_fuyu.py/0
{ "file_path": "transformers/tests/models/fuyu/test_modeling_fuyu.py", "repo_id": "transformers", "token_count": 5982 }
572
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from huggingface_hub import hf_hub_download from transformers import GitConfig, GitProcessor, GitVisionConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, GitForCausalLM, GitModel, GitVisionModel if is_vision_available(): from PIL import Image class GitVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, patch_size=16, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return GitVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = GitVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GitVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as GIT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (GitVisionModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = GitVisionModelTester(self) self.config_tester = ConfigTester(self, config_class=GitVisionConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GIT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/git-base" model = GitVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class GitModelTester: def __init__( self, parent, num_channels=3, image_size=32, patch_size=16, batch_size=13, text_seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.batch_size = batch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope # make sure the BOS, EOS and PAD tokens are within the vocab self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 # for GIT, the sequence length is the sum of the text and patch tokens, + 1 due to the CLS token self.seq_length = self.text_seq_length + int((self.image_size / self.patch_size) ** 2) + 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, input_ids, input_mask, pixel_values def get_config(self): """ Returns a tiny configuration by default. """ return GitConfig( vision_config={ "num_channels": self.num_channels, "image_size": self.image_size, "patch_size": self.patch_size, "hidden_size": self.hidden_size, "projection_dim": 32, "num_hidden_layers": self.num_hidden_layers, "num_attention_heads": self.num_attention_heads, }, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) def create_and_check_model(self, config, input_ids, input_mask, pixel_values): model = GitModel(config=config) model.to(torch_device) model.eval() # inference with pixel values result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # inference without pixel values result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # inference with pixel values result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) # inference without pixel values result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.vocab_size)) # training result = model(input_ids, attention_mask=input_mask, pixel_values=pixel_values, labels=input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertTrue(result.loss.item() > 0) def _test_beam_search_generate(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # generate generated_ids = model.generate( input_ids, attention_mask=input_mask, pixel_values=pixel_values, do_sample=False, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def _test_batched_generate_captioning(self, config, input_ids, input_mask, pixel_values): model = GitForCausalLM(config=config) model.to(torch_device) model.eval() # generate generated_ids = model.generate( input_ids=None, # captioning -> no input_ids attention_mask=None, pixel_values=pixel_values, do_sample=False, min_length=20, max_length=20, num_beams=2, num_return_sequences=2, ) self.parent.assertEqual(generated_ids.shape, (self.batch_size * 2, 20)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, pixel_values, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": input_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class GitModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": GitModel, "image-to-text": GitForCausalLM, "text-generation": GitForCausalLM, "image-text-to-text": GitForCausalLM, } if is_torch_available() else {} ) fx_compatible = False test_torchscript = False # special case for GitForCausalLM model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_CAUSAL_LM_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def setUp(self): self.model_tester = GitModelTester(self) self.config_tester = ConfigTester(self, config_class=GitConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_beam_search_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_beam_search_generate(*config_and_inputs) def test_batched_generate_captioning(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester._test_batched_generate_captioning(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def _check_attentions_for_generate( self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values ): # GIT attention shape depends on image inputs, overwrite image_length = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1) prompt_length += image_length output_length += image_length super()._check_attentions_for_generate( batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False ): # GIT attention shape depends on image inputs, overwrite image_length = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1) prompt_length += image_length output_length += image_length super()._check_hidden_states_for_generate( batch_size, hidden_states, prompt_length, output_length, config, use_cache=use_cache ) @slow def test_model_from_pretrained(self): model_name = "microsoft/git-base" model = GitModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="GIT has pixel values as additional input") def test_beam_search_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_contrastive_generate_low_memory(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_greedy_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="GIT has pixel values as additional input") def test_dola_decoding_sample(self): pass @require_torch @require_vision @slow class GitModelIntegrationTest(unittest.TestCase): def test_forward_pass(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, text="hello world", return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 201, 30522)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.9514, -0.9512, -0.9507], [-0.5454, -0.5453, -0.5453], [-0.8862, -0.8857, -0.8848]], device=torch_device, ) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_inference_image_captioning(self): processor = GitProcessor.from_pretrained("microsoft/git-base") model = GitForCausalLM.from_pretrained("microsoft/git-base") model.to(torch_device) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) outputs = model.generate( pixel_values=pixel_values, max_length=20, output_scores=True, return_dict_in_generate=True ) generated_caption = processor.batch_decode(outputs.sequences, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 9)) self.assertEqual(outputs.sequences.shape, expected_shape) self.assertEqual(generated_caption, "two cats laying on a pink blanket") self.assertTrue(outputs.scores[-1].shape, expected_shape) expected_slice = torch.tensor([-0.8805, -0.8803, -0.8799], device=torch_device) torch.testing.assert_close(outputs.scores[-1][0, :3], expected_slice, rtol=1e-4, atol=1e-4) def test_visual_question_answering(self): processor = GitProcessor.from_pretrained("microsoft/git-base-textvqa") model = GitForCausalLM.from_pretrained("microsoft/git-base-textvqa") model.to(torch_device) # prepare image file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") image = Image.open(file_path).convert("RGB") inputs = processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # prepare question question = "what does the front of the bus say at the top?" input_ids = processor(text=question, add_special_tokens=False).input_ids input_ids = [processor.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0).to(torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=20) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] expected_shape = torch.Size((1, 15)) self.assertEqual(generated_ids.shape, expected_shape) self.assertEqual(generated_caption, "what does the front of the bus say at the top? special") def test_batched_generation(self): processor = GitProcessor.from_pretrained("microsoft/git-base-coco") model = GitForCausalLM.from_pretrained("microsoft/git-base-coco") model.to(torch_device) # create batch of size 2 image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(images=[image, image], return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # we have to prepare `input_ids` with the same batch size as `pixel_values` start_token_id = model.config.bos_token_id input_ids = torch.tensor([[start_token_id], [start_token_id]], device=torch_device) generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) generated_captions = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(generated_captions, ["two cats sleeping on a pink blanket next to remotes."] * 2) @slow def test_inference_interpolate_pos_encoding(self): # CLIP family models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = GitModel.from_pretrained("microsoft/git-base").to(torch_device) processor = GitProcessor.from_pretrained( "microsoft/git-base", size={"height": 180, "width": 180}, crop_size={"height": 180, "width": 180} ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # interpolate_pos_encodiung false should return value error with self.assertRaises(ValueError, msg="doesn't match model"): with torch.no_grad(): model(**inputs, interpolate_pos_encoding=False) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 130, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-1.0296, 2.5960, 0.8703], [1.7027, 1.3302, -0.4543], [-1.4932, -0.1084, 0.0502]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/git/test_modeling_git.py/0
{ "file_path": "transformers/tests/models/git/test_modeling_git.py", "repo_id": "transformers", "token_count": 10655 }
573
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GLPN model.""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNModel from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class GLPNModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, decoder_hidden_size=16, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.decoder_hidden_size = decoder_hidden_size self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return GLPNConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, decoder_hidden_size=self.decoder_hidden_size, ) def create_and_check_model(self, config, pixel_values, labels): model = GLPNModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_depth_estimation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = GLPNForDepthEstimation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GLPNModel, GLPNForDepthEstimation) if is_torch_available() else () pipeline_model_mapping = ( {"depth-estimation": GLPNForDepthEstimation, "image-feature-extraction": GLPNModel} if is_torch_available() else {} ) test_head_masking = False test_pruning = False test_resize_embeddings = False test_torch_exportable = True def setUp(self): self.model_tester = GLPNModelTester(self) self.config_tester = GLPNConfigTester(self, config_class=GLPNConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batching_equivalence(self, atol=3e-4, rtol=3e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) def test_for_depth_estimation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) @unittest.skip(reason="GLPN does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="GLPN does not have get_input_embeddings method and get_output_embeddings methods") def test_model_get_set_embeddings(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue # TODO: remove the following 3 lines once we have a MODEL_FOR_DEPTH_ESTIMATION_MAPPING # this can then be incorporated into _prepare_for_class in test_modeling_common.py if model_class.__name__ == "GLPNForDepthEstimation": batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): model_name = "vinvino02/glpn-kitti" model = GLPNModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision @slow class GLPNModelIntegrationTest(unittest.TestCase): @slow def test_inference_depth_estimation(self): image_processor = GLPNImageProcessor.from_pretrained("vinvino02/glpn-kitti") model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-kitti").to(torch_device) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the predicted depth expected_shape = torch.Size([1, 480, 640]) self.assertEqual(outputs.predicted_depth.shape, expected_shape) expected_slice = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ).to(torch_device) torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/glpn/test_modeling_glpn.py/0
{ "file_path": "transformers/tests/models/glpn/test_modeling_glpn.py", "repo_id": "transformers", "token_count": 6233 }
574
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from functools import lru_cache from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import ( VOCAB_FILES_NAMES, GPTNeoXJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, use_cache_if_possible @require_tokenizers class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "abeja/gpt-neox-japanese-2.7b" tokenizer_class = GPTNeoXJapaneseTokenizer test_rust_tokenizer = False from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = [ "こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|startoftext|>", "<|endoftext|>", ] emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 cls.special_tokens_map = {"unk_token": "<unk>"} cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.emoji_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) with open(cls.emoji_file, "w") as emoji_writer: emoji_writer.write(json.dumps(emoji_tokens)) @classmethod @use_cache_if_possible @lru_cache(maxsize=64) def get_tokenizer(cls, pretrained_name=None, **kwargs): kwargs.update(cls.special_tokens_map) pretrained_name = pretrained_name or cls.tmpdirname return GPTNeoXJapaneseTokenizer.from_pretrained(pretrained_name, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" output_text = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def get_clean_sequence(self, tokenizer): input_text, output_text = self.get_input_output_texts(tokenizer) ids = tokenizer.encode(output_text, add_special_tokens=False) text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) return text, ids def test_pretokenized_inputs(self): pass # TODO add if relevant def test_maximum_encoding_length_pair_input(self): pass # TODO add if relevant def test_maximum_encoding_length_single_input(self): pass # TODO add if relevant def test_full_tokenizer(self): tokenizer = self.get_tokenizer() # Testing tokenization input_text = "こんにちは、世界。 こんばんは、㔺界。" expected_token = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] tokens = tokenizer.tokenize(input_text) self.assertListEqual(tokens, expected_token) # Testing conversion to ids without special tokens expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] input_ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(input_ids, expected_ids) # Testing conversion to ids with special tokens input_tokens = tokens + [tokenizer.unk_token] expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) self.assertListEqual(input_ids, expected_ids) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") ids_1 = tokenizer.encode("ありがとう。", add_special_tokens=False) ids_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1) encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2) assert encoded_sentence == ids_1 assert encoded_pair == ids_1 + ids_2 @unittest.skip def test_conversion_reversible(self): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass @unittest.skip(reason="tokenizer has no padding token") def test_padding_different_model_input_name(self): pass
transformers/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py/0
{ "file_path": "transformers/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 2414 }
575
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torchvision_available, is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ( AutoProcessor, BertTokenizerFast, GPT2Tokenizer, InstructBlipVideoProcessor, PreTrainedTokenizerFast, ) if is_torchvision_available(): from transformers import InstructBlipVideoVideoProcessor @require_vision @require_torch class InstructBlipVideoProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = InstructBlipVideoProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() video_processor = InstructBlipVideoVideoProcessor() tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model") qformer_tokenizer = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert") processor = InstructBlipVideoProcessor(video_processor, tokenizer, qformer_tokenizer) processor.save_pretrained(cls.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_qformer_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).qformer_tokenizer def prepare_processor_dict(self): return {"num_query_tokens": 1} def get_video_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).video_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_additional_features(self): processor = InstructBlipVideoProcessor( tokenizer=self.get_tokenizer(), video_processor=self.get_video_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), ) with tempfile.TemporaryDirectory() as tmpdir: processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") video_processor_add_kwargs = self.get_video_processor(do_normalize=False, padding_value=1.0) processor = InstructBlipVideoProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.video_processor.to_json_string(), video_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.video_processor, InstructBlipVideoVideoProcessor) self.assertIsInstance(processor.qformer_tokenizer, BertTokenizerFast) def test_video_processor(self): video_processor = self.get_video_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = InstructBlipVideoProcessor( tokenizer=tokenizer, video_processor=video_processor, qformer_tokenizer=qformer_tokenizer, **processor_kwargs, ) image_input = self.prepare_image_inputs() input_feat_extract = video_processor(image_input, return_tensors="pt") input_processor = processor(images=image_input, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): video_processor = self.get_video_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = InstructBlipVideoProcessor( tokenizer=tokenizer, video_processor=video_processor, qformer_tokenizer=qformer_tokenizer, **processor_kwargs, ) input_str = ["lower newer"] encoded_processor = processor(text=input_str) encoded_tokens = tokenizer(input_str, return_token_type_ids=False) encoded_tokens_qformer = qformer_tokenizer(input_str, return_token_type_ids=False) for key in encoded_tokens: self.assertListEqual(encoded_tokens[key], encoded_processor[key]) for key in encoded_tokens_qformer: self.assertListEqual(encoded_tokens_qformer[key], encoded_processor["qformer_" + key]) def test_processor(self): video_processor = self.get_video_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = InstructBlipVideoProcessor( tokenizer=tokenizer, video_processor=video_processor, qformer_tokenizer=qformer_tokenizer, **processor_kwargs, ) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["qformer_input_ids", "qformer_attention_mask", "input_ids", "attention_mask", "pixel_values"], ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): video_processor = self.get_video_processor() tokenizer = self.get_tokenizer() qformer_tokenizer = self.get_qformer_tokenizer() processor_kwargs = self.prepare_processor_dict() processor = InstructBlipVideoProcessor( tokenizer=tokenizer, video_processor=video_processor, qformer_tokenizer=qformer_tokenizer, **processor_kwargs, ) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/instructblipvideo/test_processing_instructblipvideo.py/0
{ "file_path": "transformers/tests/models/instructblipvideo/test_processing_instructblipvideo.py", "repo_id": "transformers", "token_count": 2808 }
576
# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import unittest from tempfile import TemporaryDirectory import numpy as np import pytest import requests from transformers.models.auto.processing_auto import processor_class_from_name from transformers.testing_utils import ( get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, require_vision, ) from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, CLIPImageProcessor, Kosmos2Processor, PreTrainedTokenizerFast, XLMRobertaTokenizer, XLMRobertaTokenizerFast, ) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers @require_vision class Kosmos2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Kosmos2Processor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = CLIPImageProcessor(do_center_crop=False) # We have a SentencePiece fixture for testing slow_tokenizer = XLMRobertaTokenizer(SAMPLE_VOCAB) fast_tokenizer = XLMRobertaTokenizerFast(__slow_tokenizer=slow_tokenizer) processor = Kosmos2Processor(image_processor, fast_tokenizer) processor.save_pretrained(cls.tmpdirname) # We override this method to take the fast tokenizer by default def get_component(self, attribute, **kwargs): assert attribute in self.processor_class.attributes component_class_name = getattr(self.processor_class, f"{attribute}_class") if isinstance(component_class_name, tuple): if attribute == "image_processor": component_class_name = component_class_name[0] else: component_class_name = component_class_name[-1] component_class = processor_class_from_name(component_class_name) component = component_class.from_pretrained(self.tmpdirname, **kwargs) # noqa if attribute == "tokenizer" and not component.pad_token: component.pad_token = "[TEST_PAD]" return component def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_image_procesor_load_save_reload(self): # make sure load from Hub repo. -> save -> reload locally work image_processor = CLIPImageProcessor.from_pretrained("microsoft/kosmos-2-patch14-224") with TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = CLIPImageProcessor.from_pretrained(tmp_dir) assert image_processor.to_dict() == reloaded_image_processor.to_dict() assert image_processor.to_json_string() == reloaded_image_processor.to_json_string() def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = Kosmos2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = Kosmos2Processor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, CLIPImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_processor = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_processor: self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" encoded_processor = processor(text=input_str, add_eos_token=True) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) input_str = "This is a test" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual( list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask", "image_embeds_position_mask"] ) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = Kosmos2Processor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) @require_torch def test_full_processor(self): url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/two_dogs.jpg" processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224") # test with different input formats. # fmt: off texts = [ # no phrase "<grounding> Two puppies sit in a field of grass.", # 1 phrase "<grounding> <phrase> Two puppies </phrase> sit in a field of grass.", # 2 phrases "<grounding> <phrase> Two puppies </phrase> sit in a field of <phrase> grass </phrase>.", # 2 phrases: bboxes already specified for the 1st phrase "<grounding> <phrase> Two puppies </phrase> <object> <patch_index_0079> <patch_index_1016> </delimiter_of_multi_objects/> <patch_index_0135> <patch_index_1008> </object> sit in a field of <phrase> grass </phrase>.", ] # fmt: on image = Image.open(requests.get(url, stream=True).raw) # To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed image_path = os.path.join(self.tmpdirname, "image.jpg") image.save(image_path) image = Image.open(image_path) # fmt: off bboxes = [ [None, []], [[None], [[]], [(79, 1016)], [[(79, 1016)]], [[(79, 1016), (135, 1008)]]], [[[(79, 1016), (135, 1008)], None], [[(79, 1016), (135, 1008)], []], [[(79, 1016), (135, 1008)], (480, 1023)], [[(79, 1016), (135, 1008)], [(480, 1023)]]], [[None, [(480, 1023)]]], ] # fmt: on batch_image = [image] * 4 batch_text = [texts[0], texts[1], texts[1], texts[2]] batch_bboxes = [ None, # no phrase [[]], # 1 phrase: no bbox [(79, 1016)], # 1 phrase: 1 bbox [[(79, 1016), (135, 1008)], (480, 1023)], # 2 phrase: 2 bboxes + 1 bbox ] # fmt: off expected_input_ids = [ [0, 64012, 1264, 17772, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 4464, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 106, 4, 2], [0, 64012, 64007, 1264, 17772, 64008, 64009, 64092, 65029, 64011, 64148, 65021, 64010, 1357, 12, 10, 770, 9, 64007, 4464, 64008, 64009, 64493, 65036, 64010, 106, 4, 2], ] # fmt: on EXPECTED_PIXEL_VALUES_1 = np.array( [ [ [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6535852551460266, -0.6389868259429932, -0.6243883967399597], [-0.6243883967399597, -0.6243883967399597, -0.5951915383338928], ], [ [-0.20629698038101196, -0.19128920137882233, -0.19128920137882233], [-0.20629698038101196, -0.19128920137882233, -0.17628143727779388], [-0.2213047444820404, -0.20629698038101196, -0.16127367317676544], ], [ [-0.5843556523323059, -0.5701355338096619, -0.5701355338096619], [-0.5843556523323059, -0.5701355338096619, -0.5559154152870178], [-0.5843556523323059, -0.5559154152870178, -0.5416953563690186], ], ] ) EXPECTED_PIXEL_VALUES_2 = np.array( [ [ [-0.4346088469028473, -0.47840413451194763, -0.7849710583686829], [-0.5221993923187256, -0.5076009631156921, -0.755774199962616], [-0.5221993923187256, -0.5076009631156921, -0.7411757707595825], ], [ [-0.2813358008861542, -0.2963435649871826, -0.431413471698761], [-0.26632803678512573, -0.2963435649871826, -0.4764367938041687], [-0.2213047444820404, -0.2813358008861542, -0.49144455790519714], ], [ [-0.5701355338096619, -0.641235888004303, -0.7549964189529419], [-0.5843556523323059, -0.641235888004303, -0.7834365367889404], [-0.5559154152870178, -0.641235888004303, -0.7834365367889404], ], ] ) def check(texts, bboxes, expected_input_ids): outputs = processor(images=None, text=texts, bboxes=bboxes, add_eos_token=True) self.assertListEqual(outputs.input_ids, expected_input_ids) # no phrase check(texts[0], bboxes[0][0], expected_input_ids[0]) # no phrase check(texts[0], bboxes[0][1], expected_input_ids[0]) # 1 phrase: no bbox check(texts[1], bboxes[1][0], expected_input_ids[1]) # 1 phrase: no bbox check(texts[1], bboxes[1][1], expected_input_ids[1]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][2], expected_input_ids[2]) # 1 phrase: 1 bbox check(texts[1], bboxes[1][3], expected_input_ids[2]) # 1 phrase: 2 bboxes check(texts[1], bboxes[1][4], expected_input_ids[3]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[1], bboxes=[[None]]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][0], expected_input_ids[4]) # 2 phrase: 2 bboxes + no bbox check(texts[2], bboxes[2][1], expected_input_ids[4]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][2], expected_input_ids[5]) # 2 phrase: 2 bboxes + 1 bbox check(texts[2], bboxes[2][3], expected_input_ids[5]) # 2 phrase: no box (as already specified in the text) + 1 bbox check(texts[3], bboxes[3][0], expected_input_ids[5]) # could not contain `[None]` with pytest.raises(ValueError): _ = processor.preprocess_examples(images=None, texts=texts[2], bboxes=[[(79, 1016), (135, 1008)], [None]]) # test batch outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, add_eos_token=True, ) self.assertListEqual( outputs.input_ids, [expected_input_ids[0], expected_input_ids[1], expected_input_ids[2], expected_input_ids[5]], ) # test batch with padding (without `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask[-1], [1] * len(expected_input_ids[5])) # test batch with padding (with `return_tensors`) outputs = processor( images=None, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the right self.assertListEqual( outputs.input_ids.numpy().tolist()[0], expected_input_ids[0] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) self.assertListEqual( outputs.attention_mask.numpy().tolist()[0], [1] * len(expected_input_ids[0]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), ) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], expected_input_ids[5]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], [1] * len(expected_input_ids[5])) # test with image num_image_tokens = 64 outputs = processor(images=image, text=texts[0], bboxes=None, add_eos_token=True) self.assertTupleEqual(outputs.pixel_values[0].shape, (3, 224, 224)) self.assertListEqual( outputs.input_ids, [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], ) self.assertListEqual( outputs.image_embeds_position_mask, [0] * 2 + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[0]) - 1), ) np.testing.assert_allclose(outputs.pixel_values[0][:3, :3, :3], EXPECTED_PIXEL_VALUES_1, atol=1e-9) np.testing.assert_allclose(outputs.pixel_values[0][:3, -3:, -3:], EXPECTED_PIXEL_VALUES_2, atol=1e-9) # test with image in batch (right padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) self.assertTupleEqual(outputs.pixel_values.shape, (4, 3, 224, 224)) np.testing.assert_allclose( outputs.pixel_values[:, :3, :3, :3].numpy(), [EXPECTED_PIXEL_VALUES_1] * len(batch_image), atol=1e-9 ) np.testing.assert_allclose( outputs.pixel_values[:, :3, -3:, -3:].numpy(), [EXPECTED_PIXEL_VALUES_2] * len(batch_image), atol=1e-9 ) # padding on the right: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH_RIGHT_PADDING = [ [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:] + [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH_RIGHT_PADDING = [ [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]) + [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH_RIGHT_PADDING[0]) self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH_RIGHT_PADDING[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH_RIGHT_PADDING[-1]) self.assertListEqual( outputs.image_embeds_position_mask.numpy().tolist(), [[0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1)] * len(batch_image), ) processor = Kosmos2Processor.from_pretrained("microsoft/kosmos-2-patch14-224", padding_side="left") # test with image in batch (left padding) outputs = processor( images=batch_image, text=batch_text, bboxes=batch_bboxes, return_tensors="pt", padding=True, add_eos_token=True, ) # padding on the left: the `[1:]` below is because the part for `BOS` is already added in the beginning of each (dynamically computed) expected value # noqa # fmt: off EXPECTED_IDS_BATCH = [ [1] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[0][1:], [0, 64003] + list(range(4, 4 + num_image_tokens)) + [64004] + expected_input_ids[5][1:], ] EXPECTED_MASK_BATCH =[ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [1, 1] + [1] * num_image_tokens + [1] + [1] * len(expected_input_ids[0][1:]), [1] * (2 + num_image_tokens + len(expected_input_ids[5])), ] EXPECTED_IMG_POS_MASK_BATCH = [ [0] * (len(expected_input_ids[5]) - len(expected_input_ids[0])) + [0, 0] + [1] * num_image_tokens + [0] + [0] * len(expected_input_ids[0][1:]), [0, 0] + [1] * num_image_tokens + [0] + [0] * (len(expected_input_ids[5]) - 1), ] # fmt: on self.assertListEqual(outputs.input_ids.numpy().tolist()[0], EXPECTED_IDS_BATCH[0]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[0], EXPECTED_MASK_BATCH[0]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[0], EXPECTED_IMG_POS_MASK_BATCH[0]) # no padding for the longest sequence self.assertListEqual(outputs.input_ids.numpy().tolist()[-1], EXPECTED_IDS_BATCH[-1]) self.assertListEqual(outputs.attention_mask.numpy().tolist()[-1], EXPECTED_MASK_BATCH[-1]) self.assertListEqual(outputs.image_embeds_position_mask.numpy().tolist()[-1], EXPECTED_IMG_POS_MASK_BATCH[-1]) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_vision @require_torch def test_kwargs_overrides_default_tokenizer_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=117) processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", max_length=112, padding="max_length", ) self.assertEqual(len(inputs["input_ids"][0]), 112) # Rewrite to test only image_processor kwargs @require_torch @require_vision def test_structured_kwargs_nested(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.skip_processor_without_typed_kwargs(processor) self.assertEqual(inputs["pixel_values"].shape[2], 214) # Rewrite to test only image_processor kwargs @require_torch @require_vision def test_structured_kwargs_nested_from_dict(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() image_input = self.prepare_image_inputs() # Define the kwargs for each modality all_kwargs = { "common_kwargs": {"return_tensors": "pt"}, "images_kwargs": {"size": {"height": 214, "width": 214}}, } inputs = processor(text=input_str, images=image_input, **all_kwargs) self.assertEqual(inputs["pixel_values"].shape[2], 214) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_vision @require_torch def test_tokenizer_defaults_preserved_by_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor(text=input_str, images=image_input, return_tensors="pt") self.assertEqual(len(inputs["input_ids"][0]), 117) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_torch @require_vision def test_unstructured_kwargs(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs() # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", padding="max_length", max_length=76, ) self.assertEqual(len(inputs["input_ids"][0]), 76) # Rewrite as Kosmos-2 supports custom padding only when image is None. @require_torch @require_vision def test_unstructured_kwargs_batched(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = self.prepare_text_inputs(batch_size=2) # set image input to None image_input = None inputs = processor( text=input_str, images=image_input, return_tensors="pt", size={"height": 214, "width": 214}, padding="longest", max_length=76, ) self.assertEqual(len(inputs["input_ids"][0]), 10)
transformers/tests/models/kosmos2/test_processing_kosmos2.py/0
{ "file_path": "transformers/tests/models/kosmos2/test_processing_kosmos2.py", "repo_id": "transformers", "token_count": 12190 }
577
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from tests.models.superglue.test_image_processing_superglue import ( SuperGlueImageProcessingTest, SuperGlueImageProcessingTester, ) from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import numpy as np import torch from transformers.models.lightglue.modeling_lightglue import LightGlueKeypointMatchingOutput if is_vision_available(): from transformers import LightGlueImageProcessor def random_array(size): return np.random.randint(255, size=size) def random_tensor(size): return torch.rand(size) class LightGlueImageProcessingTester(SuperGlueImageProcessingTester): """Tester for LightGlueImageProcessor""" def __init__( self, parent, batch_size=6, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_grayscale=True, ): super().__init__( parent, batch_size, num_channels, image_size, min_resolution, max_resolution, do_resize, size, do_grayscale ) def prepare_keypoint_matching_output(self, pixel_values): """Prepare a fake output for the keypoint matching model with random matches between 50 keypoints per image.""" max_number_keypoints = 50 batch_size = len(pixel_values) mask = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int) keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2)) matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int) scores = torch.zeros((batch_size, 2, max_number_keypoints)) prune = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int) for i in range(batch_size): random_number_keypoints0 = np.random.randint(10, max_number_keypoints) random_number_keypoints1 = np.random.randint(10, max_number_keypoints) random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1)) mask[i, 0, :random_number_keypoints0] = 1 mask[i, 1, :random_number_keypoints1] = 1 keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2)) keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2)) random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches] random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches] matches[i, 0, random_matches_indices1] = random_matches_indices0 matches[i, 1, random_matches_indices0] = random_matches_indices1 scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,)) scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,)) return LightGlueKeypointMatchingOutput( mask=mask, keypoints=keypoints, matches=matches, matching_scores=scores, prune=prune ) @require_torch @require_vision class LightGlueImageProcessingTest(SuperGlueImageProcessingTest, unittest.TestCase): image_processing_class = LightGlueImageProcessor if is_vision_available() else None def setUp(self) -> None: super().setUp() self.image_processor_tester = LightGlueImageProcessingTester(self)
transformers/tests/models/lightglue/test_image_processing_lightglue.py/0
{ "file_path": "transformers/tests/models/lightglue/test_image_processing_lightglue.py", "repo_id": "transformers", "token_count": 1595 }
578
# Copyright 2021, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Marian model.""" import tempfile import unittest from transformers import MarianConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_torch_fp16, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoConfig, AutoModelWithLMHead, AutoTokenizer, MarianModel, MarianMTModel, TranslationPipeline, ) from transformers.models.marian.modeling_marian import ( MarianDecoder, MarianEncoder, MarianForCausalLM, shift_tokens_right, ) def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class MarianModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=100, eos_token_id=2, pad_token_id=1, bos_token_id=0, decoder_start_token_id=3, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return MarianConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = MarianModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] head_mask = inputs_dict["head_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = MarianModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = MarianEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = MarianDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MarianModel, MarianMTModel) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": MarianModel, "summarization": MarianMTModel, "text-generation": MarianForCausalLM, "text2text-generation": MarianMTModel, "translation": MarianMTModel, } if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = MarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MarianMTModel(config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_share_encoder_decoder_embeddings(self): config, input_dict = self.model_tester.prepare_config_and_inputs() # check if embeddings are shared by default for model_class in self.all_model_classes: model = model_class(config) self.assertIs(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIs(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if embeddings are not shared when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) # check if a model with shared embeddings can be saved and loaded with share_encoder_decoder_embeddings = False config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, share_encoder_decoder_embeddings=False) self.assertIsNot(model.get_encoder().embed_tokens, model.get_decoder().embed_tokens) self.assertIsNot(model.get_encoder().embed_tokens.weight, model.get_decoder().embed_tokens.weight) def test_resize_decoder_token_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs() # check if resize_decoder_token_embeddings raises an error when embeddings are shared for model_class in self.all_model_classes: model = model_class(config) with self.assertRaises(ValueError): model.resize_decoder_token_embeddings(config.vocab_size + 1) # check if decoder embeddings are resized when config.share_encoder_decoder_embeddings = False config.share_encoder_decoder_embeddings = False for model_class in self.all_model_classes: model = model_class(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.get_decoder().embed_tokens.weight.shape, (config.vocab_size + 1, config.d_model)) # check if lm_head is also resized config, _ = self.model_tester.prepare_config_and_inputs() config.share_encoder_decoder_embeddings = False model = MarianMTModel(config) model.resize_decoder_token_embeddings(config.vocab_size + 1) self.assertEqual(model.lm_head.weight.shape, (config.vocab_size + 1, config.d_model)) @unittest.skip def test_tie_word_embeddings_decoder(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) @require_torch @require_sentencepiece @require_tokenizers class MarianIntegrationTest(unittest.TestCase): src = "en" tgt = "de" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", "Tom asked his teacher for advice.", "That's how I would do it.", "Tom really admired Mary's courage.", "Turn around and close your eyes.", ] expected_text = [ "Ich bin ein kleiner Frosch.", "Jetzt kann ich die 100 Wörter des Deutschen vergessen, die ich kenne.", "Tom bat seinen Lehrer um Rat.", "So würde ich das machen.", "Tom bewunderte Marias Mut wirklich.", "Drehen Sie sich um und schließen Sie die Augen.", ] # ^^ actual C++ output differs slightly: (1) des Deutschen removed, (2) ""-> "O", (3) tun -> machen @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self): return AutoTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): model: MarianMTModel = AutoModelWithLMHead.from_pretrained(self.model_name).to(torch_device) c = model.config self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertEqual(c.max_length, 512) self.assertEqual(c.decoder_start_token_id, c.pad_token_id) if torch_device == "cuda": return model.half() else: return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, padding=True, return_tensors="pt", **tokenizer_kwargs).to( torch_device ) self.assertEqual(self.model.device, model_inputs.input_ids.device) generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128, renormalize_logits=True, # Marian should always renormalize its logits. See #25459 ) generated_words = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return generated_words @require_sentencepiece @require_tokenizers class TestMarian_EN_DE_More(MarianIntegrationTest): @slow def test_forward(self): src, tgt = ["I am a small frog"], ["Ich bin ein kleiner Frosch."] expected_ids = [38, 121, 14, 697, 38848, 0] model_inputs = self.tokenizer(src, text_target=tgt, return_tensors="pt").to(torch_device) self.assertListEqual(expected_ids, model_inputs.input_ids[0].tolist()) desired_keys = { "input_ids", "attention_mask", "labels", } self.assertSetEqual(desired_keys, set(model_inputs.keys())) model_inputs["decoder_input_ids"] = shift_tokens_right( model_inputs.labels, self.tokenizer.pad_token_id, self.model.config.decoder_start_token_id ) model_inputs["return_dict"] = True model_inputs["use_cache"] = False with torch.no_grad(): outputs = self.model(**model_inputs) max_indices = outputs.logits.argmax(-1) self.tokenizer.batch_decode(max_indices) def test_unk_support(self): t = self.tokenizer ids = t(["||"], return_tensors="pt").to(torch_device).input_ids[0].tolist() expected = [t.unk_token_id, t.unk_token_id, t.eos_token_id] self.assertEqual(expected, ids) def test_pad_not_split(self): input_ids_w_pad = self.tokenizer(["I am a small frog <pad>"], return_tensors="pt").input_ids[0].tolist() expected_w_pad = [38, 121, 14, 697, 38848, self.tokenizer.pad_token_id, 0] # pad self.assertListEqual(expected_w_pad, input_ids_w_pad) @slow def test_batch_generation_en_de(self): self._assert_generated_batch_equal_expected() def test_auto_config(self): config = AutoConfig.from_pretrained(self.model_name) self.assertIsInstance(config, MarianConfig) @require_sentencepiece @require_tokenizers class TestMarian_EN_FR(MarianIntegrationTest): src = "en" tgt = "fr" src_text = [ "I am a small frog.", "Now I can forget the 100 words of german that I know.", ] expected_text = [ "Je suis une petite grenouille.", "Maintenant, je peux oublier les 100 mots d'allemand que je connais.", ] @slow def test_batch_generation_en_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_FR_EN(MarianIntegrationTest): src = "fr" tgt = "en" src_text = [ "Donnez moi le micro.", "Tom et Mary étaient assis à une table.", # Accents ] expected_text = [ "Give me the microphone.", "Tom and Mary were sitting at a table.", ] @slow def test_batch_generation_fr_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_RU_FR(MarianIntegrationTest): src = "ru" tgt = "fr" src_text = ["Он показал мне рукопись своей новой пьесы."] expected_text = ["Il m'a montré le manuscrit de sa nouvelle pièce."] @slow def test_batch_generation_ru_fr(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_MT_EN(MarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks without adjust_logits_generation overwritten""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_zh(MarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_eng_zho(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers class TestMarian_en_ROMANCE(MarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow @require_torch def test_pipeline(self): pipeline = TranslationPipeline(self.model, self.tokenizer, framework="pt", device=torch_device) output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output]) @require_sentencepiece @require_tokenizers class TestMarian_FI_EN_V2(MarianIntegrationTest): src = "fi" tgt = "en" src_text = [ "minä tykkään kirjojen lukemisesta", "Pidän jalkapallon katsomisesta", ] expected_text = ["I like to read books", "I like watching football"] @classmethod def setUpClass(cls) -> None: cls.model_name = "hf-internal-testing/test-opus-tatoeba-fi-en-v2" return cls @slow def test_batch_generation_fi_en(self): self._assert_generated_batch_equal_expected() class MarianStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=2, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=100, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = MarianConfig( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, num_hidden_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = MarianDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = MarianDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model( next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True )["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class MarianStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (MarianDecoder, MarianForCausalLM) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = MarianStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) @unittest.skip(reason="Decoder cannot keep gradients") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="Decoder cannot keep gradients") def test_flex_attention_with_grads(): return
transformers/tests/models/marian/test_modeling_marian.py/0
{ "file_path": "transformers/tests/models/marian/test_modeling_marian.py", "repo_id": "transformers", "token_count": 14106 }
579
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right EN_CODE = 250004 RO_CODE = 250020 @require_sentencepiece @require_tokenizers class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/mbart-large-en-ro" tokenizer_class = MBartTokenizer rust_tokenizer_class = MBartTokenizerFast test_rust_tokenizer = True test_sentencepiece = True @classmethod def setUpClass(cls): super().setUpClass() # We have a SentencePiece fixture for testing tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(cls.tmpdirname) def test_full_tokenizer(self): tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_rust_tokenizer(pretrained_name, **kwargs) tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs) tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) tokenizer_r_files = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=True tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it save with the same files self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) # Save tokenizer rust, legacy_format=False tmpdirname2 = tempfile.mkdtemp() tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False) tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2) tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(tokenizer_rp, key)) shutil.rmtree(tmpdirname2) @unittest.skip(reason="Need to fix this after #26538") def test_training_new_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class MBartEnroIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/mbart-large-en-ro" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE] @classmethod def setUpClass(cls): cls.tokenizer: MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO" ) cls.pad_token_id = 1 return cls def check_language_codes(self): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 250020) def test_enro_tokenizer_batch_encode_plus(self): ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, ids) def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], EN_CODE) self.assertEqual(len(ids), desired_max_length) def test_mask_token(self): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [250026, 250001]) def test_special_tokens_unaffacted_by_save_load(self): tmpdirname = tempfile.mkdtemp() original_special_tokens = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(tmpdirname) new_tok = MBartTokenizer.from_pretrained(tmpdirname) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, original_special_tokens) @require_torch def test_batch_fairseq_parity(self): batch = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=True, return_tensors="pt") batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 14), batch.input_ids.shape) self.assertEqual((2, 14), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE]) def test_seq2seq_max_length(self): batch = self.tokenizer(self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt") targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right(labels, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, }, )
transformers/tests/models/mbart/test_tokenization_mbart.py/0
{ "file_path": "transformers/tests/models/mbart/test_tokenization_mbart.py", "repo_id": "transformers", "token_count": 6638 }
580
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MiniMax model.""" import unittest import pytest from transformers import MiniMaxConfig, is_torch_available from transformers.cache_utils import Cache from transformers.testing_utils import ( Expectations, require_flash_attn, require_torch, require_torch_accelerator, require_torch_gpu, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( MiniMaxForCausalLM, MiniMaxForQuestionAnswering, MiniMaxForSequenceClassification, MiniMaxForTokenClassification, MiniMaxModel, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester class MiniMaxModelTester(CausalLMModelTester): config_class = MiniMaxConfig if is_torch_available(): base_model_class = MiniMaxModel causal_lm_class = MiniMaxForCausalLM sequence_class = MiniMaxForSequenceClassification token_class = MiniMaxForTokenClassification question_answering_class = MiniMaxForQuestionAnswering def __init__(self, parent, layer_types=None, block_size=3): super().__init__(parent) self.layer_types = layer_types self.block_size = block_size @require_torch class MiniMaxModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( MiniMaxModel, MiniMaxForCausalLM, MiniMaxForSequenceClassification, MiniMaxForTokenClassification, MiniMaxForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": MiniMaxModel, "text-classification": MiniMaxForSequenceClassification, "token-classification": MiniMaxForTokenClassification, "text-generation": MiniMaxForCausalLM, "question-answering": MiniMaxForQuestionAnswering, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False model_tester_class = MiniMaxModelTester # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="MiniMax flash attention does not support right padding") def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_local_experts = 8 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = MiniMaxForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) self.assertEqual(result.router_logits[0].shape, (91, config.num_local_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens (assume that pad_token_id=1) to input_ids padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(1).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of including padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) def _check_attentions_for_generate( self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (output_length - prompt_length)) use_cache = decoder_past_key_values is not None for generated_length, iter_attentions in enumerate(attentions): # regardless of using cache, the first forward pass will have the full prompt as input if use_cache and generated_length > 0: model_input_length = 1 else: model_input_length = prompt_length + generated_length expected_shape = ( batch_size, config.num_attention_heads, model_input_length, prompt_length + generated_length, ) for layer_idx, layer_attention in enumerate(iter_attentions): if config.layer_types[layer_idx] == "full_attention": self.assertEqual(layer_attention.shape, expected_shape) def _check_past_key_values_for_generate(self, batch_size, decoder_past_key_values, cache_length, config): self.assertIsInstance(decoder_past_key_values, (tuple, Cache)) # (batch, head, seq_length, head_features) key_value_cache_expected_shape = ( batch_size, config.num_key_value_heads, cache_length, config.hidden_size // config.num_attention_heads, ) # (batch, head, head_features, head_features) linear_cache_expected_shape = ( batch_size, config.num_attention_heads, config.hidden_size // config.num_attention_heads, config.hidden_size // config.num_attention_heads, ) for layer_idx in range(config.num_hidden_layers): if config.layer_types[layer_idx] == "full_attention": self.assertEqual(decoder_past_key_values[layer_idx][0].shape, key_value_cache_expected_shape) self.assertEqual(decoder_past_key_values[layer_idx][1].shape, key_value_cache_expected_shape) else: self.assertEqual(decoder_past_key_values[layer_idx][0].shape, linear_cache_expected_shape) @pytest.mark.generate def test_past_key_values_format(self, custom_all_cache_shapes=None): """ Test that the KV cache is formatted correctly. """ for model_class in self.all_generative_model_classes: config, inputs = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(torch_device) model = model.eval() if "use_cache" not in inputs: inputs["use_cache"] = True outputs = model(**inputs) past_kv = outputs["past_key_values"] batch_size, seq_length = inputs["input_ids"].shape self._check_past_key_values_for_generate(batch_size, past_kv, seq_length, config) @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_prompt_lookup_decoding_matches_greedy_search(self): pass @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_contrastive_generate_low_memory(self): pass @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_assisted_decoding_sample(self): pass @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_assisted_decoding_matches_greedy_search_0_random(self): pass @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_assisted_decoding_matches_greedy_search_1_same(self): pass @unittest.skip(reason="MiniMaxCache does not support `crop()` method") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Model needs refactor") def test_attention_outputs(self): pass @unittest.skip("MiniMax is special") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("MiniMax is special") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @require_torch @require_torch_accelerator @slow class MiniMaxIntegrationTest(unittest.TestCase): def test_small_model_logits(self): model_id = "hf-internal-testing/MiniMax-tiny" dummy_input = torch.LongTensor([[0, 1, 0], [0, 1, 0]]).to(torch_device) model = MiniMaxForCausalLM.from_pretrained( model_id, dtype=torch.bfloat16, ).to(torch_device) with torch.no_grad(): logits = model(dummy_input).logits logits = logits.float() expectations = Expectations( { (None, None): [[1.0312, -0.5156, -0.3262], [-0.1152, 0.4336, 0.2412], [1.2188, -0.5898, -0.0381]], ("cuda", 8): [[1.0312, -0.5156, -0.3203], [-0.1201, 0.4375, 0.2402], [1.2188, -0.5898, -0.0396]], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(logits[0, :3, :3], expected_slice, atol=1e-3, rtol=1e-3) torch.testing.assert_close(logits[1, :3, :3], expected_slice, atol=1e-3, rtol=1e-3) def test_small_model_generation(self): model_id = "hf-internal-testing/MiniMax-tiny" dummy_input = torch.LongTensor([[0, 1, 0], [0, 1, 0]]).to(torch_device) model = MiniMaxForCausalLM.from_pretrained( model_id, dtype=torch.bfloat16, ).to(torch_device) expected_slice = ( torch.tensor([[0, 1, 0, 933, 307, 3102, 2457, 1208], [0, 1, 0, 933, 307, 3102, 2457, 1208]]) .to(torch.int64) .to(torch_device) ) outputs = model.generate(dummy_input, max_new_tokens=5, do_sample=False) torch.testing.assert_close(outputs, expected_slice, atol=1e-3, rtol=1e-3)
transformers/tests/models/minimax/test_modeling_minimax.py/0
{ "file_path": "transformers/tests/models/minimax/test_modeling_minimax.py", "repo_id": "transformers", "token_count": 5026 }
581
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OmDet-Turbo model.""" import copy import unittest from io import BytesIO import requests from transformers import OmDetTurboConfig, is_torch_available, is_vision_available from transformers.feature_extraction_utils import BatchFeature from transformers.file_utils import cached_property from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import OmDetTurboForObjectDetection if is_vision_available(): from PIL import Image from transformers import AutoProcessor class OmDetTurboModelTester: def __init__( self, parent, batch_size=6, is_training=False, num_channels=3, max_text_len=7, num_classes=3, use_timm_backbone=False, backbone=None, apply_layernorm_after_vision_backbone=False, image_size=224, text_projection_in_dim=16, text_projection_out_dim=16, class_embed_dim=16, hidden_size=8, num_hidden_layers=2, num_attention_heads=2, num_queries=20, encoder_in_channels=(16, 32, 64), encoder_dim_feedforward=32, num_projection_layers=1, decoder_n_points=4, num_feature_levels=3, ): super().__init__() self.parent = parent self.batch_size = batch_size self.is_training = is_training self.num_channels = num_channels self.max_text_len = max_text_len self.num_classes = num_classes self.use_timm_backbone = use_timm_backbone self.backbone = backbone self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone self.image_size = image_size self.text_projection_in_dim = text_projection_in_dim self.text_projection_out_dim = text_projection_out_dim self.class_embed_dim = class_embed_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_queries = num_queries self.encoder_in_channels = encoder_in_channels self.encoder_dim_feedforward = encoder_dim_feedforward self.num_projection_layers = num_projection_layers self.decoder_n_points = decoder_n_points self.num_feature_levels = num_feature_levels self.encoder_seq_length_vision = self.image_size // 32 self.decoder_seq_length = self.num_queries def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_ids_tasks = ids_tensor([self.batch_size, self.max_text_len], self.num_classes) input_ids_tasks = input_ids_tasks.to(torch_device) input_ids_classes = torch.cat( [ids_tensor([self.num_classes, self.max_text_len], self.num_classes) for _ in range(self.batch_size)] ) input_ids_classes = input_ids_classes.to(torch_device) attention_mask_tasks = torch.ones_like(input_ids_tasks, device=torch_device) attention_mask_classes = torch.ones_like(input_ids_classes, device=torch_device) classes_structure = torch.ones(self.batch_size, dtype=torch.long, device=torch_device) * self.num_classes encoding = BatchFeature() encoding.update( { "pixel_values": pixel_values, "classes_input_ids": input_ids_classes, "classes_attention_mask": attention_mask_classes, "tasks_input_ids": input_ids_tasks, "tasks_attention_mask": attention_mask_tasks, "classes_structure": classes_structure, } ) config = self.get_config() return config, encoding def get_config(self): text_backbone = { "hidden_size": 16, "num_hidden_layers": 2, "num_attention_heads": 2, "intermediate_size": 16, "max_position_embeddings": 8, "model_type": "clip_text_model", } backbone_config = { "embed_dim": self.hidden_size, "depths": (1, 1, 1, 1), "num_heads": (1, 1, 1, 1), "window_size": 7, "image_size": self.image_size, "out_indices": (2, 3, 4), "model_type": "swin", } return OmDetTurboConfig( text_config=text_backbone, backbone_config=backbone_config, use_timm_backbone=self.use_timm_backbone, backbone=self.backbone, apply_layernorm_after_vision_backbone=self.apply_layernorm_after_vision_backbone, decoder_num_layers=self.num_hidden_layers, image_size=self.image_size, encoder_in_channels=self.encoder_in_channels, num_queries=self.num_queries, encoder_layers=self.num_hidden_layers, encoder_projection_indices=[2] * self.num_projection_layers, encoder_attention_heads=self.num_attention_heads, decoder_num_heads=self.num_attention_heads, decoder_num_points=self.decoder_n_points, num_feature_levels=self.num_feature_levels, encoder_dim_feedforward=self.encoder_dim_feedforward, task_encoder_hidden_dim=self.encoder_dim_feedforward, decoder_dim_feedforward=self.encoder_dim_feedforward, class_embed_dim=self.class_embed_dim, text_projection_in_dim=self.text_projection_in_dim, text_projection_out_dim=self.text_projection_out_dim, encoder_hidden_dim=self.hidden_size, decoder_hidden_dim=self.hidden_size, vision_features_channels=[self.hidden_size, self.hidden_size, self.hidden_size], ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_object_detection_head_model(self, config, inputs_dict): model = OmDetTurboForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.parent.assertEqual(result.decoder_coord_logits.shape, (self.batch_size, self.num_queries, 4)) self.parent.assertEqual( result.decoder_class_logits.shape, (self.batch_size, self.num_queries, self.num_classes) ) @require_torch class OmDetTurboModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OmDetTurboForObjectDetection,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False pipeline_model_mapping = ( {"zero-shot-object-detection": OmDetTurboForObjectDetection} if is_torch_available() else {} ) # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) return inputs_dict def setUp(self): self.model_tester = OmDetTurboModelTester(self) self.config_tester = ConfigTester( self, config_class=OmDetTurboConfig, has_text_modality=False, common_properties=["d_model", "encoder_attention_heads", "decoder_num_heads"], ) def test_config(self): self.config_tester.run_common_tests() def test_object_detection_head_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_object_detection_head_model(config, inputs_dict) @unittest.skip( reason="Unsupported as classes_input_ids are classes input are flattened by the processor: https://github.com/huggingface/transformers/issues/33669" ) def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="OmDet-Turbo does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OmDet-Turbo does not have 'input_ids' and 'attention_mask'") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="OmDet-Turbo does not have 'input_ids' and 'attention_mask'") def test_torchscript_output_hidden_states(self): pass @unittest.skip(reason="OmDet-Turbo does not have 'input_ids' and 'attention_mask'") def test_torchscript_simple(self): pass @unittest.skip(reason="OmDet-Turbo does not have 'input_ids' and 'attention_mask'") def test_torchscript_output_hidden_state(self): pass def test_resize_tokens_embeddings(self): # rewrite as OmDet-Turbo does not have "input_ids" and "decoder_input_ids" ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_embed_pre_resize = model.get_input_embeddings() type_model_embed_pre_resize = type(model_embed_pre_resize) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["tasks_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that classes_input_ids are resized as well if "classes_input_ids" in inputs_dict: inputs_dict["classes_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertTrue(new_model_vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], new_model_vocab_size) self.assertTrue(new_model_vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # Overwrite as `init_reference_points` is not batch dependent and contains `inf` values def test_batching_equivalence(self): """ Tests that the model supports batching and that the output is nearly the same for the same input in different batch sizes. (Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535) """ def get_tensor_equivalence_function(batched_input): # models operating on continuous spaces have higher abs difference than LMs # instead, we can rely on cos distance for image/speech models, similar to `diffusers` if "input_ids" not in batched_input: return lambda tensor1, tensor2: ( 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=1e-38) ) return lambda tensor1, tensor2: torch.max(torch.abs(tensor1 - tensor2)) def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return elif key != "init_reference_points": # init # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}", ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() equivalence = get_tensor_equivalence_function(batched_input) for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"): config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: # DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan` if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key: model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] if key in ("decoder_class_logits", "decoder_classes", "encoder_class_logits"): # check if all elements are close to 0, if so skip the test as the test strugles with comparing # tensors with all elements close to 0 if torch.allclose( model_batched_output[key], torch.zeros_like(model_batched_output[key]), atol=1e-6 ) and torch.allclose(model_row_output[key], torch.zeros_like(model_row_output[key]), atol=1e-6): continue recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions[-1] self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions[-1] self.assertEqual( len(attentions), self.model_tester.num_hidden_layers * self.model_tester.num_projection_layers ) # Rest of the shape seems to depend on backbone output shapes and image size self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.encoder_seq_length_vision**2, self.model_tester.encoder_seq_length_vision**2, ], ) # decoder attentions decoder_attentions = outputs.decoder_attentions[0] self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_queries + self.model_tester.max_text_len, self.model_tester.num_queries + self.model_tester.max_text_len, ], ) # cross attentions cross_attentions = outputs.decoder_attentions[-1] self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self_attentions = outputs.encoder_attentions[-1] self.assertEqual( len(self_attentions), self.model_tester.num_hidden_layers * self.model_tester.num_projection_layers ) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, self.model_tester.encoder_seq_length_vision**2, self.model_tester.encoder_seq_length_vision**2, ], ) # overwrite since encoder_hidden_states are 3-dim and not 2-dim def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_projection_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = self.model_tester.encoder_seq_length_vision self.assertListEqual(list(hidden_states[0].shape[-3:]), [self.model_tester.hidden_size, seq_len, seq_len]) hidden_states = outputs.decoder_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0][0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() cross_attentions = outputs.decoder_attentions[-1][0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if ( "embeddings" in name or ".fc" in name or "decoder.channel_projection_layers" in name or "query_position_head" in name or "decoder.encoder_vision_features" in name or "language_backbone.text_projection" in name ): continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} seems not properly initialized", ) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") return image def prepare_text(): text_labels = ["cat", "remote"] task = "Detect {}.".format(", ".join(text_labels)) return text_labels, task def prepare_img_batched(): url1 = "http://images.cocodataset.org/val2017/000000039769.jpg" url2 = "http://images.cocodataset.org/train2017/000000257813.jpg" url3 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" return [Image.open(BytesIO(requests.get(url).content)).convert("RGB") for url in [url1, url2, url3]] def prepare_text_batched(): text_labels1 = ["cat", "remote"] text_labels2 = ["boat"] text_labels3 = ["statue", "trees", "torch"] task1 = "Detect {}.".format(", ".join(text_labels1)) task2 = "Detect all the boat in the image." task3 = "Focus on the foreground, detect statue, torch and trees." return [text_labels1, text_labels2, text_labels3], [task1, task2, task3] @require_timm @require_vision @slow class OmDetTurboModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf") if is_vision_available() else None def test_inference_object_detection_head(self): model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf").to(torch_device) processor = self.default_processor image = prepare_img() text_labels, task = prepare_text() encoding = processor(images=image, text=text_labels, task=task, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape_coord_logits = torch.Size((1, model.config.num_queries, 4)) expected_shape_class_logits = torch.Size((1, model.config.num_queries, 2)) self.assertEqual(outputs.decoder_coord_logits.shape, expected_shape_coord_logits) self.assertEqual(outputs.decoder_class_logits.shape, expected_shape_class_logits) expected_class_logits = torch.tensor([[[0.9427, -2.5958], [0.2105, -3.4569], [-2.6364, -4.1610]]]).to( torch_device ) expected_coord_logits = torch.tensor( [[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]] ).to(torch_device) torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1) torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3) # verify grounded postprocessing results = processor.post_process_grounded_object_detection( outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7675, 0.7196, 0.5634, 0.5524]).to(torch_device) expected_slice_boxes = torch.tensor([39.8870, 70.3522, 176.7424, 118.0354]).to(torch_device) self.assertEqual(len(results["scores"]), 4) torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) expected_text_labels = ["remote", "cat", "remote", "cat"] self.assertListEqual(results["text_labels"], expected_text_labels) def test_inference_object_detection_head_fp16(self): model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf").to( torch_device, dtype=torch.float16 ) processor = self.default_processor image = prepare_img() text_labels, task = prepare_text() encoding = processor(images=image, text=text_labels, task=task, return_tensors="pt").to( torch_device, dtype=torch.float16 ) with torch.no_grad(): outputs = model(**encoding) expected_shape_coord_logits = torch.Size((1, model.config.num_queries, 4)) expected_shape_class_logits = torch.Size((1, model.config.num_queries, 2)) self.assertEqual(outputs.decoder_coord_logits.shape, expected_shape_coord_logits) self.assertEqual(outputs.decoder_class_logits.shape, expected_shape_class_logits) expected_class_logits = torch.tensor([[[0.9427, -2.5958], [0.2105, -3.4569], [-2.6364, -4.1610]]]).to( torch_device, dtype=torch.float16 ) expected_coord_logits = torch.tensor( [[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]] ).to(torch_device, dtype=torch.float16) torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1) torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3) # verify grounded postprocessing results = processor.post_process_grounded_object_detection( outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7675, 0.7196, 0.5634, 0.5524]).to(torch_device, dtype=torch.float16) expected_slice_boxes = torch.tensor([39.8870, 70.3522, 176.7424, 118.0354]).to( torch_device, dtype=torch.float16 ) self.assertEqual(len(results["scores"]), 4) torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-1, atol=1e-1) expected_text_labels = ["remote", "cat", "remote", "cat"] self.assertListEqual(results["text_labels"], expected_text_labels) def test_inference_object_detection_head_no_task(self): model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf").to(torch_device) processor = self.default_processor image = prepare_img() text_labels, _ = prepare_text() encoding = processor(images=image, text=text_labels, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape_coord_logits = torch.Size((1, model.config.num_queries, 4)) expected_shape_class_logits = torch.Size((1, model.config.num_queries, 2)) self.assertEqual(outputs.decoder_coord_logits.shape, expected_shape_coord_logits) self.assertEqual(outputs.decoder_class_logits.shape, expected_shape_class_logits) expected_class_logits = torch.tensor([[[0.9427, -2.5958], [0.2105, -3.4569], [-2.6364, -4.1610]]]).to( torch_device ) expected_coord_logits = torch.tensor( [[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]] ).to(torch_device) torch.testing.assert_close(outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1) torch.testing.assert_close(outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3) # verify grounded postprocessing results = processor.post_process_grounded_object_detection( outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.7675, 0.7196, 0.5634, 0.5524]).to(torch_device) expected_slice_boxes = torch.tensor([39.8870, 70.3522, 176.7424, 118.0354]).to(torch_device) self.assertEqual(len(results["scores"]), 4) torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-2, atol=1e-2) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) expected_text_labels = ["remote", "cat", "remote", "cat"] self.assertListEqual(results["text_labels"], expected_text_labels) def test_inference_object_detection_head_batched(self): torch_device = "cpu" model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf").to(torch_device) processor = self.default_processor images_batched = prepare_img_batched() text_labels_batched, tasks_batched = prepare_text_batched() encoding = processor( images=images_batched, text=text_labels_batched, task=tasks_batched, return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape_coord_logits = torch.Size((len(images_batched), model.config.num_queries, 4)) expected_shape_class_logits = torch.Size((len(images_batched), model.config.num_queries, 3)) self.assertEqual(outputs.decoder_coord_logits.shape, expected_shape_coord_logits) self.assertEqual(outputs.decoder_class_logits.shape, expected_shape_class_logits) expected_class_logits = torch.tensor( [[[0.9427, -2.5958, -7.7601]], [[-2.3408, -9.3516, -9.3516]], [[1.0740, -2.3315, -1.1885]]] ).to(torch_device) expected_coord_logits = torch.tensor( [[[0.2550, 0.5501, 0.4738]], [[0.2535, 0.6006, 0.0353]], [[0.3742, 0.3337, 0.0666]]] ).to(torch_device) torch.testing.assert_close( outputs.decoder_class_logits[:, :1, :3], expected_class_logits, rtol=1e-1, atol=1e-1 ) torch.testing.assert_close( outputs.decoder_coord_logits[:, :1, :3], expected_coord_logits, rtol=1e-3, atol=1e-3 ) # verify grounded postprocessing results = processor.post_process_grounded_object_detection( outputs, text_labels=text_labels_batched, target_sizes=[image.size[::-1] for image in images_batched], threshold=0.2, ) expected_scores = torch.tensor([0.7675, 0.3016, 0.7454]).to(torch_device) expected_slice_boxes = torch.tensor( [ [39.8870, 70.3522, 176.7424, 118.0354], [146.5446, 219.7132, 209.6983, 251.0456], [545.3470, 209.9055, 651.9860, 502.1882], ] ).to(torch_device) self.assertListEqual([len(result["scores"]) for result in results], [4, 4, 6]) torch.testing.assert_close( torch.stack([result["scores"][0] for result in results]), expected_scores, rtol=1e-2, atol=1e-2 ) torch.testing.assert_close( torch.stack([result["boxes"][0, :] for result in results]), expected_slice_boxes, rtol=1e-2, atol=1e-2 ) expected_text_labels = [ ["remote", "cat", "remote", "cat"], ["boat", "boat", "boat", "boat"], ["statue", "trees", "trees", "torch", "statue", "statue"], ] self.assertListEqual([result["text_labels"] for result in results], expected_text_labels) @require_torch_accelerator def test_inference_object_detection_head_equivalence_cpu_accelerator(self): processor = self.default_processor image = prepare_img() text_labels, task = prepare_text() encoding = processor(images=image, text=text_labels, task=task, return_tensors="pt") # 1. run model on CPU model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf") with torch.no_grad(): cpu_outputs = model(**encoding) # 2. run model on accelerator model.to(torch_device) encoding = encoding.to(torch_device) with torch.no_grad(): gpu_outputs = model(**encoding) # 3. assert equivalence expected_class_logits = torch.tensor([[[0.9427, -2.5958], [0.2105, -3.4569], [-2.6364, -4.1610]]]) expected_coord_logits = torch.tensor( [[[0.2550, 0.5501, 0.4738, 0.8745], [0.7695, 0.4121, 0.4603, 0.7244], [0.7691, 0.4117, 0.4603, 0.7214]]] ) torch.testing.assert_close( cpu_outputs.decoder_class_logits[:3, :3], expected_class_logits, rtol=1e-1, atol=1e-1 ) torch.testing.assert_close( cpu_outputs.decoder_coord_logits[:3, :3], expected_coord_logits, rtol=1e-3, atol=1e-3 ) # verify grounded postprocessing results_cpu = processor.post_process_grounded_object_detection( cpu_outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]] )[0] result_gpu = processor.post_process_grounded_object_detection( gpu_outputs, text_labels=[text_labels], target_sizes=[image.size[::-1]] )[0] torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-2, atol=1e-2) torch.testing.assert_close(results_cpu["boxes"][0, :], result_gpu["boxes"][0, :].cpu(), rtol=1e-2, atol=1e-2)
transformers/tests/models/omdet_turbo/test_modeling_omdet_turbo.py/0
{ "file_path": "transformers/tests/models/omdet_turbo/test_modeling_omdet_turbo.py", "repo_id": "transformers", "token_count": 19540 }
582
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import AutoProcessor, Owlv2ForObjectDetection, Owlv2ImageProcessor if is_torch_available(): import torch from transformers import Owlv2ImageProcessorFast class Owlv2ImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size if size is not None else {"height": 18, "width": 18} self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_convert_rgb = do_convert_rgb def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class Owlv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Owlv2ImageProcessor if is_vision_available() else None fast_image_processing_class = Owlv2ImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Owlv2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @slow def test_image_processor_integration_test(self): for image_processing_class in self.image_processor_list: processor = image_processing_class() image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") pixel_values = processor(image, return_tensors="pt").pixel_values mean_value = round(pixel_values.mean().item(), 4) self.assertEqual(mean_value, 0.2353) @slow def test_image_processor_integration_test_resize(self): for use_fast in [False, True]: checkpoint = "google/owlv2-base-patch16-ensemble" processor = AutoProcessor.from_pretrained(checkpoint, use_fast=use_fast) model = Owlv2ForObjectDetection.from_pretrained(checkpoint) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") text = ["cat"] target_size = image.size[::-1] expected_boxes = torch.tensor( [ [341.66656494140625, 23.38756561279297, 642.321044921875, 371.3482971191406], [6.753320693969727, 51.96149826049805, 326.61810302734375, 473.12982177734375], ] ) # single image inputs = processor(text=[text], images=[image], return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) results = processor.post_process_object_detection(outputs, threshold=0.2, target_sizes=[target_size])[0] boxes = results["boxes"] torch.testing.assert_close(boxes, expected_boxes, atol=1e-1, rtol=1e-1) # batch of images inputs = processor(text=[text, text], images=[image, image], return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) results = processor.post_process_object_detection( outputs, threshold=0.2, target_sizes=[target_size, target_size] ) for result in results: boxes = result["boxes"] torch.testing.assert_close(boxes, expected_boxes, atol=1e-1, rtol=1e-1) @unittest.skip(reason="OWLv2 doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy def test_call_numpy_4_channels(self): pass
transformers/tests/models/owlv2/test_image_processing_owlv2.py/0
{ "file_path": "transformers/tests/models/owlv2/test_image_processing_owlv2.py", "repo_id": "transformers", "token_count": 2907 }
583
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.file_utils import is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from transformers import PromptDepthAnythingImageProcessor class PromptDepthAnythingImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } def expected_output_image_shape(self, images): return self.num_channels, self.size["height"], self.size["width"] def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class PromptDepthAnythingImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = PromptDepthAnythingImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = PromptDepthAnythingImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "prompt_scale_to_meter")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_keep_aspect_ratio(self): size = {"height": 512, "width": 512} image_processor = PromptDepthAnythingImageProcessor(size=size, keep_aspect_ratio=True, ensure_multiple_of=32) image = np.zeros((489, 640, 3)) pixel_values = image_processor(image, return_tensors="pt").pixel_values self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672]) def test_prompt_depth_processing(self): size = {"height": 756, "width": 756} image_processor = PromptDepthAnythingImageProcessor(size=size, keep_aspect_ratio=True, ensure_multiple_of=32) image = np.zeros((756, 1008, 3)) prompt_depth = np.random.random((192, 256)) outputs = image_processor(image, prompt_depth=prompt_depth, return_tensors="pt") pixel_values = outputs.pixel_values prompt_depth_values = outputs.prompt_depth self.assertEqual(list(pixel_values.shape), [1, 3, 768, 1024]) self.assertEqual(list(prompt_depth_values.shape), [1, 1, 192, 256])
transformers/tests/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py/0
{ "file_path": "transformers/tests/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py", "repo_id": "transformers", "token_count": 2112 }
584
# Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen3MoE model.""" import unittest import pytest from transformers import AutoTokenizer, Qwen3MoeConfig, is_torch_available, set_seed from transformers.testing_utils import ( cleanup, require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, require_torch_large_accelerator, require_torch_multi_accelerator, slow, torch_device, ) if is_torch_available(): import torch from transformers import ( Qwen3ForQuestionAnswering, Qwen3MoeForCausalLM, Qwen3MoeForQuestionAnswering, Qwen3MoeForSequenceClassification, Qwen3MoeForTokenClassification, Qwen3MoeModel, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester class Qwen3MoeModelTester(CausalLMModelTester): config_class = Qwen3MoeConfig if is_torch_available(): base_model_class = Qwen3MoeModel causal_lm_class = Qwen3MoeForCausalLM sequence_class = Qwen3MoeForSequenceClassification token_class = Qwen3MoeForTokenClassification question_answering_class = Qwen3MoeForQuestionAnswering @require_torch class Qwen3MoeModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = ( ( Qwen3MoeModel, Qwen3MoeForCausalLM, Qwen3MoeForSequenceClassification, Qwen3MoeForTokenClassification, Qwen3MoeForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": Qwen3MoeModel, "text-classification": Qwen3MoeForSequenceClassification, "token-classification": Qwen3MoeForTokenClassification, "text-generation": Qwen3MoeForCausalLM, "question-answering": Qwen3ForQuestionAnswering, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False test_all_params_have_gradient = False model_tester_class = Qwen3MoeModelTester # TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146 def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): return True @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest(reason="Qwen3Moe flash attention does not support right padding") # Ignore copy def test_load_balancing_loss(self): r""" Let's make sure we can actually compute the loss and do a backward on it. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.num_experts = 8 config.expert_interval = 2 config.output_router_logits = True input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = Qwen3MoeForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask) self.assertEqual(result.router_logits[0].shape, (91, config.num_experts)) torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2) # First, we make sure that adding padding tokens doesn't change the loss # loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding) pad_length = 1000 # Add padding tokens (assume that pad_token_id=1) to input_ids padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device) padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left padded_attention_mask = padded_input_ids.ne(1).to(torch_device) padded_result = model(padded_input_ids, attention_mask=padded_attention_mask) torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4) # We make sure that the loss of including padding tokens != the loss without padding tokens # if attention_mask=None --> we don't exclude padding tokens include_padding_result = model(padded_input_ids, attention_mask=None) # This is to mimic torch.testing.assert_not_close self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item()) # Run on runners with larger accelerators (for example A10 instead of T4) with a lot of CPU RAM (e.g. g5-12xlarge) @require_torch_multi_accelerator @require_torch_large_accelerator @require_torch class Qwen3MoeIntegrationTest(unittest.TestCase): @classmethod def setUpClass(cls): cls.model = None @classmethod def tearDownClass(cls): del cls.model cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @classmethod def get_model(cls): if cls.model is None: cls.model = Qwen3MoeForCausalLM.from_pretrained( "Qwen/Qwen3-30B-A3B-Base", device_map="auto", load_in_4bit=True ) return cls.model @slow def test_model_15b_a2b_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = self.get_model() input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.float().cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[0.3244, 0.4406, 9.0972, 7.3597, 4.9985, 8.0314, 8.2148, 9.2134]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([6.8984, 4.8633, 4.7734, 4.5898, 2.5664, 2.9902, 4.8828, 5.9414, 4.6250, 3.0840, 5.1602, 6.0117, 4.9453, 5.3008, 3.3145, 11.3906, 12.8359, 12.4844, 11.2891, 11.0547, 11.0391, 10.3359, 10.3438, 10.2578, 10.7969, 5.9688, 3.7676, 5.5938, 5.3633, 5.8203]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) @slow def test_model_15b_a2b_generation(self): EXPECTED_TEXT_COMPLETION = "To be or not to be: the role of the cell cycle in the regulation of apoptosis.\nThe cell cycle is a highly" prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False) model = self.get_model() input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_15b_a2b_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen3MoeForCausalLM.from_pretrained( "Qwen/Qwen3-30B-A3B-Base", device_map="auto", load_in_4bit=True, attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) @slow def test_model_15b_a2b_long_prompt_sdpa(self): EXPECTED_OUTPUT_TOKEN_IDS = [306, 338] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = self.get_model() input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) EXPECTED_TEXT_COMPLETION = "To be or not to be: the role of the cell cycle in the regulation of apoptosis.\nThe cell cycle is a highly" prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_speculative_generation(self): EXPECTED_TEXT_COMPLETION = ( "To be or not to be: the role of the liver in the pathogenesis of obesity and type 2 diabetes.\nThe" ) prompt = "To be or not to" tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False) model = self.get_model() assistant_model = model input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs set_seed(0) generated_ids = model.generate( input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model ) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
transformers/tests/models/qwen3_moe/test_modeling_qwen3_moe.py/0
{ "file_path": "transformers/tests/models/qwen3_moe/test_modeling_qwen3_moe.py", "repo_id": "transformers", "token_count": 4644 }
585
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ResNet model.""" import unittest from transformers import ResNetConfig from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ResNetBackbone, ResNetForImageClassification, ResNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class ResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = ResNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = ResNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:]) # verify backbone works with out_features=None config.out_features = None model = ResNetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ResNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ResNetModel, ResNetForImageClassification, ResNetBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ResNetModel, "image-classification": ResNetForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False test_torch_exportable = True def setUp(self): self.model_tester = ResNetModelTester(self) self.config_tester = ConfigTester( self, config_class=ResNetConfig, has_text_modality=False, common_properties=["num_channels", "hidden_sizes"], ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_get_set_embeddings(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, module in model.named_modules(): if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertTrue( torch.all(module.bias == 0), msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip(reason="ResNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/resnet-50" model = ResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expectations = Expectations( { (None, None): [-11.1069, -9.7877, -8.3777], ("cuda", 8): [-11.1069, -9.7877, -8.3777], } ) expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4) @require_torch class ResNetBackboneTest(BackboneTesterMixin, unittest.TestCase): all_model_classes = (ResNetBackbone,) if is_torch_available() else () has_attentions = False config_class = ResNetConfig def setUp(self): self.model_tester = ResNetModelTester(self)
transformers/tests/models/resnet/test_modeling_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_resnet.py", "repo_id": "transformers", "token_count": 5028 }
586